xref: /original-bsd/sys/vax/uba/uba.c (revision 8e206d2f)
1 /*
2  * Copyright (c) 1982, 1986 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)uba.c	7.8 (Berkeley) 02/17/90
18  */
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "map.h"
23 #include "buf.h"
24 #include "vm.h"
25 #include "user.h"
26 #include "proc.h"
27 #include "conf.h"
28 #include "dkstat.h"
29 #include "kernel.h"
30 
31 #include "../vax/pte.h"
32 #include "../vax/cpu.h"
33 #include "../vax/mtpr.h"
34 #include "../vax/nexus.h"
35 #include "ubareg.h"
36 #include "ubavar.h"
37 
38 #ifdef DW780
39 char	ubasr_bits[] = UBASR_BITS;
40 #endif
41 
42 #define	spluba	spl7		/* IPL 17 */
43 
44 /*
45  * Do transfer on device argument.  The controller
46  * and uba involved are implied by the device.
47  * We queue for resource wait in the uba code if necessary.
48  * We return 1 if the transfer was started, 0 if it was not.
49  *
50  * The onq argument must be zero iff the device is not on the
51  * queue for this UBA.  If onq is set, the device must be at the
52  * head of the queue.  In any case, if the transfer is started,
53  * the device will be off the queue, and if not, it will be on.
54  *
55  * Drivers that allocate one BDP and hold it for some time should
56  * set ud_keepbdp.  In this case um_bdp tells which BDP is allocated
57  * to the controller, unless it is zero, indicating that the controller
58  * does not now have a BDP.
59  */
60 ubaqueue(ui, onq)
61 	register struct uba_device *ui;
62 	int onq;
63 {
64 	register struct uba_ctlr *um = ui->ui_mi;
65 	register struct uba_hd *uh;
66 	register struct uba_driver *ud;
67 	register int s, unit;
68 
69 	uh = &uba_hd[um->um_ubanum];
70 	ud = um->um_driver;
71 	s = spluba();
72 	/*
73 	 * Honor exclusive BDP use requests.
74 	 */
75 	if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
76 		goto rwait;
77 	if (ud->ud_keepbdp) {
78 		/*
79 		 * First get just a BDP (though in fact it comes with
80 		 * one map register too).
81 		 */
82 		if (um->um_bdp == 0) {
83 			um->um_bdp = uballoc(um->um_ubanum,
84 				(caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
85 			if (um->um_bdp == 0)
86 				goto rwait;
87 		}
88 		/* now share it with this transfer */
89 		um->um_ubinfo = ubasetup(um->um_ubanum,
90 			um->um_tab.b_actf->b_actf,
91 			um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
92 	} else
93 		um->um_ubinfo = ubasetup(um->um_ubanum,
94 			um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
95 	if (um->um_ubinfo == 0)
96 		goto rwait;
97 	uh->uh_users++;
98 	if (ud->ud_xclu)
99 		uh->uh_xclu = 1;
100 	splx(s);
101 	if (ui->ui_dk >= 0) {
102 		unit = ui->ui_dk;
103 		dk_busy |= 1<<unit;
104 		dk_xfer[unit]++;
105 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
106 	}
107 	if (onq)
108 		uh->uh_actf = ui->ui_forw;
109 	(*ud->ud_dgo)(um);
110 	return (1);
111 rwait:
112 	if (!onq) {
113 		ui->ui_forw = NULL;
114 		if (uh->uh_actf == NULL)
115 			uh->uh_actf = ui;
116 		else
117 			uh->uh_actl->ui_forw = ui;
118 		uh->uh_actl = ui;
119 	}
120 	splx(s);
121 	return (0);
122 }
123 
124 ubadone(um)
125 	register struct uba_ctlr *um;
126 {
127 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
128 
129 	if (um->um_driver->ud_xclu)
130 		uh->uh_xclu = 0;
131 	uh->uh_users--;
132 	if (um->um_driver->ud_keepbdp)
133 		um->um_ubinfo &= ~BDPMASK;	/* keep BDP for misers */
134 	ubarelse(um->um_ubanum, &um->um_ubinfo);
135 }
136 
137 /*
138  * Allocate and setup UBA map registers, and bdp's
139  * Flags says whether bdp is needed, whether the caller can't
140  * wait (e.g. if the caller is at interrupt level).
141  * Return value encodes map register plus page offset,
142  * bdp number and number of map registers.
143  */
144 ubasetup(uban, bp, flags)
145 	int uban;
146 	register struct buf *bp;
147 	register int flags;
148 {
149 	register struct uba_hd *uh = &uba_hd[uban];
150 	register struct pte *pte, *io;
151 	register int npf;
152 	int pfnum, temp;
153 	int reg, bdp;
154 	unsigned v;
155 	struct proc *rp;
156 	int a, o, ubinfo;
157 
158 #ifdef DW730
159 	if (uh->uh_type == DW730)
160 		flags &= ~UBA_NEEDBDP;
161 #endif
162 #ifdef QBA
163 	if (uh->uh_type == QBA)
164 		flags &= ~UBA_NEEDBDP;
165 #endif
166 	o = (int)bp->b_un.b_addr & PGOFSET;
167 	npf = btoc(bp->b_bcount + o) + 1;
168 	if (npf > UBA_MAXNMR)
169 		panic("uba xfer too big");
170 	a = spluba();
171 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
172 		if (flags & UBA_CANTWAIT) {
173 			splx(a);
174 			return (0);
175 		}
176 		uh->uh_mrwant++;
177 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
178 	}
179 	if ((flags & UBA_NEED16) && reg + npf > 128) {
180 		/*
181 		 * Could hang around and try again (if we can ever succeed).
182 		 * Won't help any current device...
183 		 */
184 		rmfree(uh->uh_map, (long)npf, (long)reg);
185 		splx(a);
186 		return (0);
187 	}
188 	bdp = 0;
189 	if (flags & UBA_NEEDBDP) {
190 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
191 			if (flags & UBA_CANTWAIT) {
192 				rmfree(uh->uh_map, (long)npf, (long)reg);
193 				splx(a);
194 				return (0);
195 			}
196 			uh->uh_bdpwant++;
197 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
198 		}
199 		uh->uh_bdpfree &= ~(1 << (bdp-1));
200 	} else if (flags & UBA_HAVEBDP)
201 		bdp = (flags >> 28) & 0xf;
202 	splx(a);
203 	reg--;
204 	ubinfo = UBAI_INFO(o, reg, npf, bdp);
205 	temp = (bdp << 21) | UBAMR_MRV;
206 	if (bdp && (o & 01))
207 		temp |= UBAMR_BO;
208 	if ((bp->b_flags & B_PHYS) == 0)
209 		pte = kvtopte(bp->b_un.b_addr);
210 	else if (bp->b_flags & B_PAGET)
211 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
212 	else {
213 		rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
214 		v = btop(bp->b_un.b_addr);
215 		if (bp->b_flags & B_UAREA)
216 			pte = &rp->p_addr[v];
217 		else
218 			pte = vtopte(rp, v);
219 	}
220 	io = &uh->uh_mr[reg];
221 	while (--npf > 0) {
222 		pfnum = pte->pg_pfnum;
223 		if (pfnum == 0)
224 			panic("uba zero uentry");
225 		pte++;
226 		*(int *)io++ = pfnum | temp;
227 	}
228 	*(int *)io = 0;
229 	return (ubinfo);
230 }
231 
232 /*
233  * Non buffer setup interface... set up a buffer and call ubasetup.
234  */
235 uballoc(uban, addr, bcnt, flags)
236 	int uban;
237 	caddr_t addr;
238 	int bcnt, flags;
239 {
240 	struct buf ubabuf;
241 
242 	ubabuf.b_un.b_addr = addr;
243 	ubabuf.b_flags = B_BUSY;
244 	ubabuf.b_bcount = bcnt;
245 	/* that's all the fields ubasetup() needs */
246 	return (ubasetup(uban, &ubabuf, flags));
247 }
248 
249 /*
250  * Release resources on uba uban, and then unblock resource waiters.
251  * The map register parameter is by value since we need to block
252  * against uba resets on 11/780's.
253  */
254 ubarelse(uban, amr)
255 	int *amr;
256 {
257 	register struct uba_hd *uh = &uba_hd[uban];
258 	register int bdp, reg, npf, s;
259 	int mr;
260 
261 	/*
262 	 * Carefully see if we should release the space, since
263 	 * it may be released asynchronously at uba reset time.
264 	 */
265 	s = spluba();
266 	mr = *amr;
267 	if (mr == 0) {
268 		/*
269 		 * A ubareset() occurred before we got around
270 		 * to releasing the space... no need to bother.
271 		 */
272 		splx(s);
273 		return;
274 	}
275 	*amr = 0;
276 	bdp = UBAI_BDP(mr);
277 	if (bdp) {
278 		switch (uh->uh_type) {
279 #ifdef DWBUA
280 		case DWBUA:
281 			BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
282 			break;
283 #endif
284 #ifdef DW780
285 		case DW780:
286 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
287 			break;
288 #endif
289 #ifdef DW750
290 		case DW750:
291 			uh->uh_uba->uba_dpr[bdp] |=
292 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
293 			break;
294 #endif
295 		default:
296 			break;
297 		}
298 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
299 		if (uh->uh_bdpwant) {
300 			uh->uh_bdpwant = 0;
301 			wakeup((caddr_t)&uh->uh_bdpwant);
302 		}
303 	}
304 	/*
305 	 * Put back the registers in the resource map.
306 	 * The map code must not be reentered,
307 	 * nor can the registers be freed twice.
308 	 * Unblock interrupts once this is done.
309 	 */
310 	npf = UBAI_NMR(mr);
311 	reg = UBAI_MR(mr) + 1;
312 	rmfree(uh->uh_map, (long)npf, (long)reg);
313 	splx(s);
314 
315 	/*
316 	 * Wakeup sleepers for map registers,
317 	 * and also, if there are processes blocked in dgo(),
318 	 * give them a chance at the UNIBUS.
319 	 */
320 	if (uh->uh_mrwant) {
321 		uh->uh_mrwant = 0;
322 		wakeup((caddr_t)&uh->uh_mrwant);
323 	}
324 	while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
325 		;
326 }
327 
328 ubapurge(um)
329 	register struct uba_ctlr *um;
330 {
331 	register struct uba_hd *uh = um->um_hd;
332 	register int bdp = UBAI_BDP(um->um_ubinfo);
333 
334 	switch (uh->uh_type) {
335 #ifdef DWBUA
336 	case DWBUA:
337 		BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
338 		break;
339 #endif
340 #ifdef DW780
341 	case DW780:
342 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
343 		break;
344 #endif
345 #ifdef DW750
346 	case DW750:
347 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
348 		break;
349 #endif
350 	default:
351 		break;
352 	}
353 }
354 
355 ubainitmaps(uhp)
356 	register struct uba_hd *uhp;
357 {
358 
359 	if (uhp->uh_memsize > UBA_MAXMR)
360 		uhp->uh_memsize = UBA_MAXMR;
361 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
362 	switch (uhp->uh_type) {
363 #ifdef DWBUA
364 	case DWBUA:
365 		uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
366 		break;
367 #endif
368 #ifdef DW780
369 	case DW780:
370 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
371 		break;
372 #endif
373 #ifdef DW750
374 	case DW750:
375 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
376 		break;
377 #endif
378 	default:
379 		break;
380 	}
381 }
382 
383 /*
384  * Generate a reset on uba number uban.  Then
385  * call each device in the character device table,
386  * giving it a chance to clean up so as to be able to continue.
387  */
388 ubareset(uban)
389 	int uban;
390 {
391 	register struct cdevsw *cdp;
392 	register struct uba_hd *uh = &uba_hd[uban];
393 	int s;
394 
395 	s = spluba();
396 	uh->uh_users = 0;
397 	uh->uh_zvcnt = 0;
398 	uh->uh_xclu = 0;
399 	uh->uh_actf = uh->uh_actl = 0;
400 	uh->uh_bdpwant = 0;
401 	uh->uh_mrwant = 0;
402 	ubainitmaps(uh);
403 	wakeup((caddr_t)&uh->uh_bdpwant);
404 	wakeup((caddr_t)&uh->uh_mrwant);
405 	printf("uba%d: reset", uban);
406 	ubainit(uh->uh_uba);
407 	ubameminit(uban);
408 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
409 		(*cdp->d_reset)(uban);
410 	ifubareset(uban);
411 	printf("\n");
412 	splx(s);
413 }
414 
415 /*
416  * Init a uba.  This is called with a pointer
417  * rather than a virtual address since it is called
418  * by code which runs with memory mapping disabled.
419  * In these cases we really don't need the interrupts
420  * enabled, but since we run with ipl high, we don't care
421  * if they are, they will never happen anyways.
422  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
423  */
424 ubainit(uba)
425 	register struct uba_regs *uba;
426 {
427 	register struct uba_hd *uhp;
428 #ifdef QBA
429 	int isphys = 0;
430 #endif
431 
432 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
433 		if (uhp->uh_uba == uba)
434 			break;
435 		if (uhp->uh_physuba == uba) {
436 #ifdef QBA
437 			isphys++;
438 #endif
439 			break;
440 		}
441 	}
442 	if (uhp >= uba_hd + numuba) {
443 		printf("init unknown uba\n");
444 		return;
445 	}
446 
447 	switch (uhp->uh_type) {
448 #ifdef DWBUA
449 	case DWBUA:
450 		BUA(uba)->bua_csr |= BUACSR_UPI;
451 		/* give devices time to recover from power fail */
452 		DELAY(500000);
453 		break;
454 #endif
455 #ifdef DW780
456 	case DW780:
457 		uba->uba_cr = UBACR_ADINIT;
458 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
459 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
460 			;
461 		break;
462 #endif
463 #ifdef DW750
464 	case DW750:
465 #endif
466 #ifdef DW730
467 	case DW730:
468 #endif
469 #ifdef QBA
470 	case QBA:
471 #endif
472 #if DW750 || DW730 || QBA
473 		mtpr(IUR, 0);
474 		/* give devices time to recover from power fail */
475 /* THIS IS PROBABLY UNNECESSARY */
476 		DELAY(500000);
477 /* END PROBABLY UNNECESSARY */
478 #ifdef QBA
479 		/*
480 		 * Re-enable local memory access
481 		 * from the Q-bus.
482 		 */
483 		if (uhp->uh_type == QBA) {
484 			if (isphys)
485 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
486 			else
487 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
488 		}
489 #endif QBA
490 		break;
491 #endif DW750 || DW730 || QBA
492 	}
493 }
494 
495 #ifdef QBA
496 /*
497  * Determine the interrupt priority of a Q-bus
498  * peripheral.  The device probe routine must spl6(),
499  * attempt to make the device request an interrupt,
500  * delaying as necessary, then call this routine
501  * before resetting the device.
502  */
503 qbgetpri()
504 {
505 	int pri;
506 	extern int cvec;
507 
508 	for (pri = 0x17; pri > 0x14; ) {
509 		if (cvec && cvec != 0x200)	/* interrupted at pri */
510 			break;
511 		pri--;
512 		splx(pri - 1);
513 	}
514 	(void) spl0();
515 	return (pri);
516 }
517 #endif
518 
519 #ifdef DW780
520 int	ubawedgecnt = 10;
521 int	ubacrazy = 500;
522 int	zvcnt_max = 5000;	/* in 8 sec */
523 /*
524  * This routine is called by the locore code to process a UBA
525  * error on an 11/780 or 8600.  The arguments are passed
526  * on the stack, and value-result (through some trickery).
527  * In particular, the uvec argument is used for further
528  * uba processing so the result aspect of it is very important.
529  * It must not be declared register.
530  */
531 /*ARGSUSED*/
532 ubaerror(uban, uh, ipl, uvec, uba)
533 	register int uban;
534 	register struct uba_hd *uh;
535 	int ipl, uvec;
536 	register struct uba_regs *uba;
537 {
538 	register sr, s;
539 
540 	if (uvec == 0) {
541 		/*
542 		 * Declare dt as unsigned so that negative values
543 		 * are handled as >8 below, in case time was set back.
544 		 */
545 		u_long	dt = time.tv_sec - uh->uh_zvtime;
546 
547 		uh->uh_zvtotal++;
548 		if (dt > 8) {
549 			uh->uh_zvtime = time.tv_sec;
550 			uh->uh_zvcnt = 0;
551 		}
552 		if (++uh->uh_zvcnt > zvcnt_max) {
553 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
554 				uban, uh->uh_zvcnt, dt + 1);
555 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
556 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
557 				uba->uba_cnfgr&0xff);
558 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
559 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
560 				(uba->uba_dcr&0x8000000)?"":"NOT ");
561 			ubareset(uban);
562 		}
563 		return;
564 	}
565 	if (uba->uba_cnfgr & NEX_CFGFLT) {
566 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
567 		    uban, uba->uba_sr, ubasr_bits,
568 		    uba->uba_cnfgr, NEXFLT_BITS);
569 		ubareset(uban);
570 		uvec = 0;
571 		return;
572 	}
573 	sr = uba->uba_sr;
574 	s = spluba();
575 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
576 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
577 	splx(s);
578 	uba->uba_sr = sr;
579 	uvec &= UBABRRVR_DIV;
580 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
581 		if (uh->uh_errcnt > ubacrazy)
582 			panic("uba crazy");
583 		printf("ERROR LIMIT ");
584 		ubareset(uban);
585 		uvec = 0;
586 		return;
587 	}
588 	return;
589 }
590 #endif
591 
592 /*
593  * Look for devices with unibus memory, allow them to configure, then disable
594  * map registers as necessary.  Called during autoconfiguration and ubareset.
595  * The device ubamem routine returns 0 on success, 1 on success if it is fully
596  * configured (has no csr or interrupt, so doesn't need to be probed),
597  * and -1 on failure.
598  */
599 ubameminit(uban)
600 {
601 	register struct uba_device *ui;
602 	register struct uba_hd *uh = &uba_hd[uban];
603 	caddr_t umembase = umem[uban] + 0x3e000, addr;
604 #define	ubaoff(off)	((int)(off) & 0x1fff)
605 
606 	uh->uh_lastmem = 0;
607 	for (ui = ubdinit; ui->ui_driver; ui++) {
608 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
609 			continue;
610 		if (ui->ui_driver->ud_ubamem) {
611 			/*
612 			 * During autoconfiguration, need to fudge ui_addr.
613 			 */
614 			addr = ui->ui_addr;
615 			ui->ui_addr = umembase + ubaoff(addr);
616 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
617 			case 1:
618 				ui->ui_alive = 1;
619 				/* FALLTHROUGH */
620 			case 0:
621 				ui->ui_ubanum = uban;
622 				break;
623 			}
624 			ui->ui_addr = addr;
625 		}
626 	}
627 #ifdef DW780
628 	/*
629 	 * On a DW780, throw away any map registers disabled by rounding
630 	 * the map disable in the configuration register
631 	 * up to the next 8K boundary, or below the last unibus memory.
632 	 */
633 	if (uh->uh_type == DW780) {
634 		register i;
635 
636 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
637 		while (i)
638 			(void) rmget(uh->uh_map, 1, i--);
639 	}
640 #endif
641 }
642 
643 /*
644  * Allocate UNIBUS memory.  Allocates and initializes
645  * sufficient mapping registers for access.  On a 780,
646  * the configuration register is setup to disable UBA
647  * response on DMA transfers to addresses controlled
648  * by the disabled mapping registers.
649  * On a DW780, should only be called from ubameminit, or in ascending order
650  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
651  * the last unibus memory would free unusable map registers.
652  * Doalloc is 1 to allocate, 0 to deallocate.
653  */
654 ubamem(uban, addr, npg, doalloc)
655 	int uban, addr, npg, doalloc;
656 {
657 	register struct uba_hd *uh = &uba_hd[uban];
658 	register int a;
659 	int s;
660 
661 	a = (addr >> 9) + 1;
662 	s = spluba();
663 	if (doalloc)
664 		a = rmget(uh->uh_map, npg, a);
665 	else
666 		rmfree(uh->uh_map, (long)npg, (long)a);
667 	splx(s);
668 	if (a) {
669 		register int i, *m;
670 
671 		m = (int *)&uh->uh_mr[a - 1];
672 		for (i = 0; i < npg; i++)
673 			*m++ = 0;	/* All off, especially 'valid' */
674 		i = addr + npg * 512;
675 		if (doalloc && i > uh->uh_lastmem)
676 			uh->uh_lastmem = i;
677 		else if (doalloc == 0 && i == uh->uh_lastmem)
678 			uh->uh_lastmem = addr;
679 #ifdef DW780
680 		/*
681 		 * On a 780, set up the map register disable
682 		 * field in the configuration register.  Beware
683 		 * of callers that request memory ``out of order''
684 		 * or in sections other than 8K multiples.
685 		 * Ubameminit handles such requests properly, however.
686 		 */
687 		if (uh->uh_type == DW780) {
688 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
689 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
690 			uh->uh_uba->uba_cr = i;
691 		}
692 #endif
693 	}
694 	return (a);
695 }
696 
697 #include "ik.h"
698 #include "vs.h"
699 #if NIK > 0 || NVS > 0
700 /*
701  * Map a virtual address into users address space. Actually all we
702  * do is turn on the user mode write protection bits for the particular
703  * page of memory involved.
704  */
705 maptouser(vaddress)
706 	caddr_t vaddress;
707 {
708 
709 	kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
710 }
711 
712 unmaptouser(vaddress)
713 	caddr_t vaddress;
714 {
715 
716 	kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
717 }
718 #endif
719