xref: /original-bsd/sys/vax/uba/uba.c (revision 179d6f6f)
1 /*
2  * Copyright (c) 1982, 1986 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)uba.c	7.10 (Berkeley) 12/16/90
8  */
9 
10 #include "sys/param.h"
11 #include "sys/systm.h"
12 #include "sys/map.h"
13 #include "sys/buf.h"
14 #include "sys/vm.h"
15 #include "sys/user.h"
16 #include "sys/proc.h"
17 #include "sys/conf.h"
18 #include "sys/dkstat.h"
19 #include "sys/kernel.h"
20 
21 #include "../include/pte.h"
22 #include "../include/cpu.h"
23 #include "../include/mtpr.h"
24 #include "../vax/nexus.h"
25 #include "ubareg.h"
26 #include "ubavar.h"
27 
28 #ifdef DW780
29 char	ubasr_bits[] = UBASR_BITS;
30 #endif
31 
32 #define	spluba	spl7		/* IPL 17 */
33 
34 /*
35  * Do transfer on device argument.  The controller
36  * and uba involved are implied by the device.
37  * We queue for resource wait in the uba code if necessary.
38  * We return 1 if the transfer was started, 0 if it was not.
39  *
40  * The onq argument must be zero iff the device is not on the
41  * queue for this UBA.  If onq is set, the device must be at the
42  * head of the queue.  In any case, if the transfer is started,
43  * the device will be off the queue, and if not, it will be on.
44  *
45  * Drivers that allocate one BDP and hold it for some time should
46  * set ud_keepbdp.  In this case um_bdp tells which BDP is allocated
47  * to the controller, unless it is zero, indicating that the controller
48  * does not now have a BDP.
49  */
50 ubaqueue(ui, onq)
51 	register struct uba_device *ui;
52 	int onq;
53 {
54 	register struct uba_ctlr *um = ui->ui_mi;
55 	register struct uba_hd *uh;
56 	register struct uba_driver *ud;
57 	register int s, unit;
58 
59 	uh = &uba_hd[um->um_ubanum];
60 	ud = um->um_driver;
61 	s = spluba();
62 	/*
63 	 * Honor exclusive BDP use requests.
64 	 */
65 	if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
66 		goto rwait;
67 	if (ud->ud_keepbdp) {
68 		/*
69 		 * First get just a BDP (though in fact it comes with
70 		 * one map register too).
71 		 */
72 		if (um->um_bdp == 0) {
73 			um->um_bdp = uballoc(um->um_ubanum,
74 				(caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
75 			if (um->um_bdp == 0)
76 				goto rwait;
77 		}
78 		/* now share it with this transfer */
79 		um->um_ubinfo = ubasetup(um->um_ubanum,
80 			um->um_tab.b_actf->b_actf,
81 			um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
82 	} else
83 		um->um_ubinfo = ubasetup(um->um_ubanum,
84 			um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
85 	if (um->um_ubinfo == 0)
86 		goto rwait;
87 	uh->uh_users++;
88 	if (ud->ud_xclu)
89 		uh->uh_xclu = 1;
90 	splx(s);
91 	if (ui->ui_dk >= 0) {
92 		unit = ui->ui_dk;
93 		dk_busy |= 1<<unit;
94 		dk_xfer[unit]++;
95 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
96 	}
97 	if (onq)
98 		uh->uh_actf = ui->ui_forw;
99 	(*ud->ud_dgo)(um);
100 	return (1);
101 rwait:
102 	if (!onq) {
103 		ui->ui_forw = NULL;
104 		if (uh->uh_actf == NULL)
105 			uh->uh_actf = ui;
106 		else
107 			uh->uh_actl->ui_forw = ui;
108 		uh->uh_actl = ui;
109 	}
110 	splx(s);
111 	return (0);
112 }
113 
114 ubadone(um)
115 	register struct uba_ctlr *um;
116 {
117 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
118 
119 	if (um->um_driver->ud_xclu)
120 		uh->uh_xclu = 0;
121 	uh->uh_users--;
122 	if (um->um_driver->ud_keepbdp)
123 		um->um_ubinfo &= ~BDPMASK;	/* keep BDP for misers */
124 	ubarelse(um->um_ubanum, &um->um_ubinfo);
125 }
126 
127 /*
128  * Allocate and setup UBA map registers, and bdp's
129  * Flags says whether bdp is needed, whether the caller can't
130  * wait (e.g. if the caller is at interrupt level).
131  * Return value encodes map register plus page offset,
132  * bdp number and number of map registers.
133  */
134 ubasetup(uban, bp, flags)
135 	int uban;
136 	register struct buf *bp;
137 	register int flags;
138 {
139 	register struct uba_hd *uh = &uba_hd[uban];
140 	register struct pte *pte, *io;
141 	register int npf;
142 	int pfnum, temp;
143 	int reg, bdp;
144 	unsigned v;
145 	struct proc *rp;
146 	int a, o, ubinfo;
147 
148 #ifdef DW730
149 	if (uh->uh_type == DW730)
150 		flags &= ~UBA_NEEDBDP;
151 #endif
152 #ifdef QBA
153 	if (uh->uh_type == QBA)
154 		flags &= ~UBA_NEEDBDP;
155 #endif
156 	o = (int)bp->b_un.b_addr & PGOFSET;
157 	npf = btoc(bp->b_bcount + o) + 1;
158 	if (npf > UBA_MAXNMR)
159 		panic("uba xfer too big");
160 	a = spluba();
161 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
162 		if (flags & UBA_CANTWAIT) {
163 			splx(a);
164 			return (0);
165 		}
166 		uh->uh_mrwant++;
167 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
168 	}
169 	if ((flags & UBA_NEED16) && reg + npf > 128) {
170 		/*
171 		 * Could hang around and try again (if we can ever succeed).
172 		 * Won't help any current device...
173 		 */
174 		rmfree(uh->uh_map, (long)npf, (long)reg);
175 		splx(a);
176 		return (0);
177 	}
178 	bdp = 0;
179 	if (flags & UBA_NEEDBDP) {
180 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
181 			if (flags & UBA_CANTWAIT) {
182 				rmfree(uh->uh_map, (long)npf, (long)reg);
183 				splx(a);
184 				return (0);
185 			}
186 			uh->uh_bdpwant++;
187 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
188 		}
189 		uh->uh_bdpfree &= ~(1 << (bdp-1));
190 	} else if (flags & UBA_HAVEBDP)
191 		bdp = (flags >> 28) & 0xf;
192 	splx(a);
193 	reg--;
194 	ubinfo = UBAI_INFO(o, reg, npf, bdp);
195 	temp = (bdp << 21) | UBAMR_MRV;
196 	if (bdp && (o & 01))
197 		temp |= UBAMR_BO;
198 	if ((bp->b_flags & B_PHYS) == 0)
199 		pte = kvtopte(bp->b_un.b_addr);
200 	else if (bp->b_flags & B_PAGET)
201 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
202 	else {
203 		rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
204 		v = btop(bp->b_un.b_addr);
205 		if (bp->b_flags & B_UAREA)
206 			pte = &rp->p_addr[v];
207 		else
208 			pte = vtopte(rp, v);
209 	}
210 	io = &uh->uh_mr[reg];
211 	while (--npf > 0) {
212 		pfnum = pte->pg_pfnum;
213 		if (pfnum == 0)
214 			panic("uba zero uentry");
215 		pte++;
216 		*(int *)io++ = pfnum | temp;
217 	}
218 	*(int *)io = 0;
219 	return (ubinfo);
220 }
221 
222 /*
223  * Non buffer setup interface... set up a buffer and call ubasetup.
224  */
225 uballoc(uban, addr, bcnt, flags)
226 	int uban;
227 	caddr_t addr;
228 	int bcnt, flags;
229 {
230 	struct buf ubabuf;
231 
232 	ubabuf.b_un.b_addr = addr;
233 	ubabuf.b_flags = B_BUSY;
234 	ubabuf.b_bcount = bcnt;
235 	/* that's all the fields ubasetup() needs */
236 	return (ubasetup(uban, &ubabuf, flags));
237 }
238 
239 /*
240  * Release resources on uba uban, and then unblock resource waiters.
241  * The map register parameter is by value since we need to block
242  * against uba resets on 11/780's.
243  */
244 ubarelse(uban, amr)
245 	int *amr;
246 {
247 	register struct uba_hd *uh = &uba_hd[uban];
248 	register int bdp, reg, npf, s;
249 	int mr;
250 
251 	/*
252 	 * Carefully see if we should release the space, since
253 	 * it may be released asynchronously at uba reset time.
254 	 */
255 	s = spluba();
256 	mr = *amr;
257 	if (mr == 0) {
258 		/*
259 		 * A ubareset() occurred before we got around
260 		 * to releasing the space... no need to bother.
261 		 */
262 		splx(s);
263 		return;
264 	}
265 	*amr = 0;
266 	bdp = UBAI_BDP(mr);
267 	if (bdp) {
268 		switch (uh->uh_type) {
269 #ifdef DWBUA
270 		case DWBUA:
271 			BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
272 			break;
273 #endif
274 #ifdef DW780
275 		case DW780:
276 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
277 			break;
278 #endif
279 #ifdef DW750
280 		case DW750:
281 			uh->uh_uba->uba_dpr[bdp] |=
282 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
283 			break;
284 #endif
285 		default:
286 			break;
287 		}
288 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
289 		if (uh->uh_bdpwant) {
290 			uh->uh_bdpwant = 0;
291 			wakeup((caddr_t)&uh->uh_bdpwant);
292 		}
293 	}
294 	/*
295 	 * Put back the registers in the resource map.
296 	 * The map code must not be reentered,
297 	 * nor can the registers be freed twice.
298 	 * Unblock interrupts once this is done.
299 	 */
300 	npf = UBAI_NMR(mr);
301 	reg = UBAI_MR(mr) + 1;
302 	rmfree(uh->uh_map, (long)npf, (long)reg);
303 	splx(s);
304 
305 	/*
306 	 * Wakeup sleepers for map registers,
307 	 * and also, if there are processes blocked in dgo(),
308 	 * give them a chance at the UNIBUS.
309 	 */
310 	if (uh->uh_mrwant) {
311 		uh->uh_mrwant = 0;
312 		wakeup((caddr_t)&uh->uh_mrwant);
313 	}
314 	while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
315 		;
316 }
317 
318 ubapurge(um)
319 	register struct uba_ctlr *um;
320 {
321 	register struct uba_hd *uh = um->um_hd;
322 	register int bdp = UBAI_BDP(um->um_ubinfo);
323 
324 	switch (uh->uh_type) {
325 #ifdef DWBUA
326 	case DWBUA:
327 		BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
328 		break;
329 #endif
330 #ifdef DW780
331 	case DW780:
332 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
333 		break;
334 #endif
335 #ifdef DW750
336 	case DW750:
337 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
338 		break;
339 #endif
340 	default:
341 		break;
342 	}
343 }
344 
345 ubainitmaps(uhp)
346 	register struct uba_hd *uhp;
347 {
348 
349 	if (uhp->uh_memsize > UBA_MAXMR)
350 		uhp->uh_memsize = UBA_MAXMR;
351 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
352 	switch (uhp->uh_type) {
353 #ifdef DWBUA
354 	case DWBUA:
355 		uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
356 		break;
357 #endif
358 #ifdef DW780
359 	case DW780:
360 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
361 		break;
362 #endif
363 #ifdef DW750
364 	case DW750:
365 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
366 		break;
367 #endif
368 	default:
369 		break;
370 	}
371 }
372 
373 /*
374  * Generate a reset on uba number uban.  Then
375  * call each device in the character device table,
376  * giving it a chance to clean up so as to be able to continue.
377  */
378 ubareset(uban)
379 	int uban;
380 {
381 	register struct cdevsw *cdp;
382 	register struct uba_hd *uh = &uba_hd[uban];
383 	int s;
384 
385 	s = spluba();
386 	uh->uh_users = 0;
387 	uh->uh_zvcnt = 0;
388 	uh->uh_xclu = 0;
389 	uh->uh_actf = uh->uh_actl = 0;
390 	uh->uh_bdpwant = 0;
391 	uh->uh_mrwant = 0;
392 	ubainitmaps(uh);
393 	wakeup((caddr_t)&uh->uh_bdpwant);
394 	wakeup((caddr_t)&uh->uh_mrwant);
395 	printf("uba%d: reset", uban);
396 	ubainit(uh->uh_uba);
397 	ubameminit(uban);
398 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
399 		(*cdp->d_reset)(uban);
400 	ifubareset(uban);
401 	printf("\n");
402 	splx(s);
403 }
404 
405 /*
406  * Init a uba.  This is called with a pointer
407  * rather than a virtual address since it is called
408  * by code which runs with memory mapping disabled.
409  * In these cases we really don't need the interrupts
410  * enabled, but since we run with ipl high, we don't care
411  * if they are, they will never happen anyways.
412  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
413  */
414 ubainit(uba)
415 	register struct uba_regs *uba;
416 {
417 	register struct uba_hd *uhp;
418 #ifdef QBA
419 	int isphys = 0;
420 #endif
421 
422 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
423 		if (uhp->uh_uba == uba)
424 			break;
425 		if (uhp->uh_physuba == uba) {
426 #ifdef QBA
427 			isphys++;
428 #endif
429 			break;
430 		}
431 	}
432 	if (uhp >= uba_hd + numuba) {
433 		printf("init unknown uba\n");
434 		return;
435 	}
436 
437 	switch (uhp->uh_type) {
438 #ifdef DWBUA
439 	case DWBUA:
440 		BUA(uba)->bua_csr |= BUACSR_UPI;
441 		/* give devices time to recover from power fail */
442 		DELAY(500000);
443 		break;
444 #endif
445 #ifdef DW780
446 	case DW780:
447 		uba->uba_cr = UBACR_ADINIT;
448 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
449 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
450 			;
451 		break;
452 #endif
453 #ifdef DW750
454 	case DW750:
455 #endif
456 #ifdef DW730
457 	case DW730:
458 #endif
459 #ifdef QBA
460 	case QBA:
461 #endif
462 #if DW750 || DW730 || QBA
463 		mtpr(IUR, 0);
464 		/* give devices time to recover from power fail */
465 /* THIS IS PROBABLY UNNECESSARY */
466 		DELAY(500000);
467 /* END PROBABLY UNNECESSARY */
468 #ifdef QBA
469 		/*
470 		 * Re-enable local memory access
471 		 * from the Q-bus.
472 		 */
473 		if (uhp->uh_type == QBA) {
474 			if (isphys)
475 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
476 			else
477 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
478 		}
479 #endif QBA
480 		break;
481 #endif DW750 || DW730 || QBA
482 	}
483 }
484 
485 #ifdef QBA
486 /*
487  * Determine the interrupt priority of a Q-bus
488  * peripheral.  The device probe routine must spl6(),
489  * attempt to make the device request an interrupt,
490  * delaying as necessary, then call this routine
491  * before resetting the device.
492  */
493 qbgetpri()
494 {
495 	int pri;
496 	extern int cvec;
497 
498 	for (pri = 0x17; pri > 0x14; ) {
499 		if (cvec && cvec != 0x200)	/* interrupted at pri */
500 			break;
501 		pri--;
502 		splx(pri - 1);
503 	}
504 	(void) spl0();
505 	return (pri);
506 }
507 #endif
508 
509 #ifdef DW780
510 int	ubawedgecnt = 10;
511 int	ubacrazy = 500;
512 int	zvcnt_max = 5000;	/* in 8 sec */
513 /*
514  * This routine is called by the locore code to process a UBA
515  * error on an 11/780 or 8600.  The arguments are passed
516  * on the stack, and value-result (through some trickery).
517  * In particular, the uvec argument is used for further
518  * uba processing so the result aspect of it is very important.
519  * It must not be declared register.
520  */
521 /*ARGSUSED*/
522 ubaerror(uban, uh, ipl, uvec, uba)
523 	register int uban;
524 	register struct uba_hd *uh;
525 	int ipl, uvec;
526 	register struct uba_regs *uba;
527 {
528 	register sr, s;
529 
530 	if (uvec == 0) {
531 		/*
532 		 * Declare dt as unsigned so that negative values
533 		 * are handled as >8 below, in case time was set back.
534 		 */
535 		u_long	dt = time.tv_sec - uh->uh_zvtime;
536 
537 		uh->uh_zvtotal++;
538 		if (dt > 8) {
539 			uh->uh_zvtime = time.tv_sec;
540 			uh->uh_zvcnt = 0;
541 		}
542 		if (++uh->uh_zvcnt > zvcnt_max) {
543 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
544 				uban, uh->uh_zvcnt, dt + 1);
545 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
546 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
547 				uba->uba_cnfgr&0xff);
548 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
549 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
550 				(uba->uba_dcr&0x8000000)?"":"NOT ");
551 			ubareset(uban);
552 		}
553 		return;
554 	}
555 	if (uba->uba_cnfgr & NEX_CFGFLT) {
556 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
557 		    uban, uba->uba_sr, ubasr_bits,
558 		    uba->uba_cnfgr, NEXFLT_BITS);
559 		ubareset(uban);
560 		uvec = 0;
561 		return;
562 	}
563 	sr = uba->uba_sr;
564 	s = spluba();
565 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
566 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
567 	splx(s);
568 	uba->uba_sr = sr;
569 	uvec &= UBABRRVR_DIV;
570 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
571 		if (uh->uh_errcnt > ubacrazy)
572 			panic("uba crazy");
573 		printf("ERROR LIMIT ");
574 		ubareset(uban);
575 		uvec = 0;
576 		return;
577 	}
578 	return;
579 }
580 #endif
581 
582 /*
583  * Look for devices with unibus memory, allow them to configure, then disable
584  * map registers as necessary.  Called during autoconfiguration and ubareset.
585  * The device ubamem routine returns 0 on success, 1 on success if it is fully
586  * configured (has no csr or interrupt, so doesn't need to be probed),
587  * and -1 on failure.
588  */
589 ubameminit(uban)
590 {
591 	register struct uba_device *ui;
592 	register struct uba_hd *uh = &uba_hd[uban];
593 	caddr_t umembase = umem[uban] + 0x3e000, addr;
594 #define	ubaoff(off)	((int)(off) & 0x1fff)
595 
596 	uh->uh_lastmem = 0;
597 	for (ui = ubdinit; ui->ui_driver; ui++) {
598 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
599 			continue;
600 		if (ui->ui_driver->ud_ubamem) {
601 			/*
602 			 * During autoconfiguration, need to fudge ui_addr.
603 			 */
604 			addr = ui->ui_addr;
605 			ui->ui_addr = umembase + ubaoff(addr);
606 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
607 			case 1:
608 				ui->ui_alive = 1;
609 				/* FALLTHROUGH */
610 			case 0:
611 				ui->ui_ubanum = uban;
612 				break;
613 			}
614 			ui->ui_addr = addr;
615 		}
616 	}
617 #ifdef DW780
618 	/*
619 	 * On a DW780, throw away any map registers disabled by rounding
620 	 * the map disable in the configuration register
621 	 * up to the next 8K boundary, or below the last unibus memory.
622 	 */
623 	if (uh->uh_type == DW780) {
624 		register i;
625 
626 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
627 		while (i)
628 			(void) rmget(uh->uh_map, 1, i--);
629 	}
630 #endif
631 }
632 
633 /*
634  * Allocate UNIBUS memory.  Allocates and initializes
635  * sufficient mapping registers for access.  On a 780,
636  * the configuration register is setup to disable UBA
637  * response on DMA transfers to addresses controlled
638  * by the disabled mapping registers.
639  * On a DW780, should only be called from ubameminit, or in ascending order
640  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
641  * the last unibus memory would free unusable map registers.
642  * Doalloc is 1 to allocate, 0 to deallocate.
643  */
644 ubamem(uban, addr, npg, doalloc)
645 	int uban, addr, npg, doalloc;
646 {
647 	register struct uba_hd *uh = &uba_hd[uban];
648 	register int a;
649 	int s;
650 
651 	a = (addr >> 9) + 1;
652 	s = spluba();
653 	if (doalloc)
654 		a = rmget(uh->uh_map, npg, a);
655 	else
656 		rmfree(uh->uh_map, (long)npg, (long)a);
657 	splx(s);
658 	if (a) {
659 		register int i, *m;
660 
661 		m = (int *)&uh->uh_mr[a - 1];
662 		for (i = 0; i < npg; i++)
663 			*m++ = 0;	/* All off, especially 'valid' */
664 		i = addr + npg * 512;
665 		if (doalloc && i > uh->uh_lastmem)
666 			uh->uh_lastmem = i;
667 		else if (doalloc == 0 && i == uh->uh_lastmem)
668 			uh->uh_lastmem = addr;
669 #ifdef DW780
670 		/*
671 		 * On a 780, set up the map register disable
672 		 * field in the configuration register.  Beware
673 		 * of callers that request memory ``out of order''
674 		 * or in sections other than 8K multiples.
675 		 * Ubameminit handles such requests properly, however.
676 		 */
677 		if (uh->uh_type == DW780) {
678 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
679 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
680 			uh->uh_uba->uba_cr = i;
681 		}
682 #endif
683 	}
684 	return (a);
685 }
686 
687 #include "ik.h"
688 #include "vs.h"
689 #if NIK > 0 || NVS > 0
690 /*
691  * Map a virtual address into users address space. Actually all we
692  * do is turn on the user mode write protection bits for the particular
693  * page of memory involved.
694  */
695 maptouser(vaddress)
696 	caddr_t vaddress;
697 {
698 
699 	kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
700 }
701 
702 unmaptouser(vaddress)
703 	caddr_t vaddress;
704 {
705 
706 	kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
707 }
708 #endif
709