xref: /netbsd/sys/arch/vax/vax/machdep.c (revision bf9ec67e)
1 /* $NetBSD: machdep.c,v 1.123 2002/03/31 00:11:13 matt Exp $	 */
2 
3 /*
4  * Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
5  * Copyright (c) 1993 Adam Glass
6  * Copyright (c) 1988 University of Utah.
7  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
8  * All rights reserved.
9  *
10  * Changed for the VAX port (and for readability) /IC
11  *
12  * This code is derived from software contributed to Berkeley by the Systems
13  * Programming Group of the University of Utah Computer Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by the University of
26  *	California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  * from: Utah Hdr: machdep.c 1.63 91/04/24
44  *
45  * @(#)machdep.c	7.16 (Berkeley) 6/3/91
46  */
47 
48 #include "opt_ddb.h"
49 #include "opt_compat_netbsd.h"
50 #include "opt_compat_ultrix.h"
51 #include "opt_multiprocessor.h"
52 #include "opt_lockdebug.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/map.h>
57 #include <sys/proc.h>
58 #include <sys/user.h>
59 #include <sys/time.h>
60 #include <sys/signal.h>
61 #include <sys/kernel.h>
62 #include <sys/msgbuf.h>
63 #include <sys/buf.h>
64 #include <sys/mbuf.h>
65 #include <sys/reboot.h>
66 #include <sys/conf.h>
67 #include <sys/device.h>
68 #include <sys/exec.h>
69 #include <sys/mount.h>
70 #include <sys/syscallargs.h>
71 #include <sys/ptrace.h>
72 
73 #include <dev/cons.h>
74 
75 #include <uvm/uvm_extern.h>
76 #include <sys/sysctl.h>
77 
78 #include <machine/sid.h>
79 #include <machine/pte.h>
80 #include <machine/mtpr.h>
81 #include <machine/cpu.h>
82 #include <machine/macros.h>
83 #include <machine/nexus.h>
84 #include <machine/trap.h>
85 #include <machine/reg.h>
86 #include <machine/db_machdep.h>
87 #include <machine/scb.h>
88 #include <vax/vax/gencons.h>
89 
90 #ifdef DDB
91 #include <ddb/db_sym.h>
92 #include <ddb/db_extern.h>
93 #endif
94 
95 #include "smg.h"
96 
97 extern int virtual_avail, virtual_end;
98 /*
99  * We do these external declarations here, maybe they should be done
100  * somewhere else...
101  */
102 char		machine[] = MACHINE;		/* from <machine/param.h> */
103 char		machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
104 char		cpu_model[100];
105 caddr_t		msgbufaddr;
106 int		physmem;
107 int		*symtab_start;
108 int		*symtab_end;
109 int		symtab_nsyms;
110 
111 #define	IOMAPSZ	100
112 static	struct map iomap[IOMAPSZ];
113 
114 struct vm_map *exec_map = NULL;
115 struct vm_map *mb_map = NULL;
116 struct vm_map *phys_map = NULL;
117 
118 #ifdef DEBUG
119 int iospace_inited = 0;
120 #endif
121 
122 struct softintr_head softclock_head = { IPL_SOFTCLOCK };
123 struct softintr_head softnet_head = { IPL_SOFTNET };
124 struct softintr_head softserial_head = { IPL_SOFTSERIAL };
125 
126 void
127 cpu_startup()
128 {
129 	caddr_t		v;
130 	int		base, residual, i, sz;
131 	vaddr_t		minaddr, maxaddr;
132 	vsize_t		size;
133 	extern unsigned int avail_end;
134 	char pbuf[9];
135 
136 	/*
137 	 * Initialize error message buffer.
138 	 */
139 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
140 
141 	/*
142 	 * Good {morning,afternoon,evening,night}.
143 	 * Also call CPU init on systems that need that.
144 	 */
145 	printf("%s\n%s\n", version, cpu_model);
146         if (dep_call->cpu_conf)
147                 (*dep_call->cpu_conf)();
148 
149 	format_bytes(pbuf, sizeof(pbuf), avail_end);
150 	printf("total memory = %s\n", pbuf);
151 	panicstr = NULL;
152 	mtpr(AST_NO, PR_ASTLVL);
153 	spl0();
154 
155 	/*
156 	 * Find out how much space we need, allocate it, and then give
157 	 * everything true virtual addresses.
158 	 */
159 
160 	sz = (int) allocsys(NULL, NULL);
161 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
162 		panic("startup: no room for tables");
163 	if (allocsys(v, NULL) - v != sz)
164 		panic("startup: table size inconsistency");
165 	/*
166 	 * Now allocate buffers proper.	 They are different than the above in
167 	 * that they usually occupy more virtual memory than physical.
168 	 */
169 	size = MAXBSIZE * nbuf;		/* # bytes for buffers */
170 
171 	/* allocate VM for buffers... area is not managed by VM system */
172 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
173 		    NULL, UVM_UNKNOWN_OFFSET, 0,
174 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
175 				UVM_ADV_NORMAL, 0)) != 0)
176 		panic("cpu_startup: cannot allocate VM for buffers");
177 
178 	minaddr = (vaddr_t) buffers;
179 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
180 		/* don't want to alloc more physical mem than needed */
181 		bufpages = btoc(MAXBSIZE) * nbuf;
182 	}
183 	base = bufpages / nbuf;
184 	residual = bufpages % nbuf;
185 	/* now allocate RAM for buffers */
186 	for (i = 0 ; i < nbuf ; i++) {
187 		vaddr_t curbuf;
188 		vsize_t curbufsize;
189 		struct vm_page *pg;
190 
191 		/*
192 		 * First <residual> buffers get (base+1) physical pages
193 		 * allocated for them.	The rest get (base) physical pages.
194 		 *
195 		 * The rest of each buffer occupies virtual space, but has no
196 		 * physical memory allocated for it.
197 		 */
198 		curbuf = (vaddr_t) buffers + i * MAXBSIZE;
199 		curbufsize = NBPG * (i < residual ? base + 1 : base);
200 		while (curbufsize) {
201 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
202 			if (pg == NULL)
203 				panic("cpu_startup: "
204 				    "not enough RAM for buffer cache");
205 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
206 			    VM_PROT_READ | VM_PROT_WRITE);
207 			curbuf += NBPG;
208 			curbufsize -= NBPG;
209 		}
210 	}
211 	pmap_update(kernel_map->pmap);
212 
213 	/*
214 	 * Allocate a submap for exec arguments.  This map effectively limits
215 	 * the number of processes exec'ing at any time.
216 	 * At most one process with the full length is allowed.
217 	 */
218 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
219 				 NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
220 
221 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
222 	/*
223 	 * Allocate a submap for physio.  This map effectively limits the
224 	 * number of processes doing physio at any one time.
225 	 */
226 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
227 				   VM_PHYS_SIZE, 0, FALSE, NULL);
228 #endif
229 
230 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
231 	printf("avail memory = %s\n", pbuf);
232 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
233 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
234 
235 	/*
236 	 * Set up buffers, so they can be used to read disk labels.
237 	 */
238 
239 	bufinit();
240 #ifdef DDB
241 	if (boothowto & RB_KDB)
242 		Debugger();
243 #endif
244 }
245 
246 u_int32_t dumpmag = 0x8fca0101;
247 int	dumpsize = 0;
248 long	dumplo = 0;
249 
250 void
251 cpu_dumpconf()
252 {
253 	int		nblks;
254 
255 	/*
256 	 * XXX include the final RAM page which is not included in physmem.
257 	 */
258 	dumpsize = physmem + 1;
259 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
260 		nblks = (*bdevsw[major(dumpdev)].d_psize) (dumpdev);
261 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
262 			dumpsize = btoc(dbtob(nblks - dumplo));
263 		else if (dumplo == 0)
264 			dumplo = nblks - btodb(ctob(dumpsize));
265 	}
266 	/*
267 	 * Don't dump on the first NBPG (why NBPG?) in case the dump
268 	 * device includes a disk label.
269 	 */
270 	if (dumplo < btodb(NBPG))
271 		dumplo = btodb(NBPG);
272 }
273 
274 int
275 cpu_sysctl(a, b, c, d, e, f, g)
276 	int	*a;
277 	u_int	b;
278 	void	*c, *e;
279 	size_t	*d, f;
280 	struct	proc *g;
281 {
282 	return (EOPNOTSUPP);
283 }
284 
285 void
286 setstatclockrate(hzrate)
287 	int hzrate;
288 {
289 }
290 
291 void
292 consinit()
293 {
294 	/*
295 	 * Init I/O memory resource map. Must be done before cninit()
296 	 * is called; we may want to use iospace in the console routines.
297 	 */
298 	rminit(iomap, IOSPSZ, (long)1, "iomap", IOMAPSZ);
299 #ifdef DEBUG
300 	iospace_inited = 1;
301 #endif
302 	cninit();
303 #if defined(DDB)
304 	if (symtab_start != NULL && symtab_nsyms != 0 && symtab_end != NULL) {
305 		ddb_init(symtab_nsyms, symtab_start, symtab_end);
306 #ifndef __ELF__
307 	} else {
308 		extern int end; /* Contains pointer to symsize also */
309 		extern paddr_t esym;
310 		ddb_init(*(int *)&end, ((int *)&end) + 1, (void *)esym);
311 #endif
312 	}
313 #ifdef DEBUG
314 	if (sizeof(struct user) > REDZONEADDR)
315 		panic("struct user inside red zone");
316 #endif
317 #endif
318 }
319 
320 #if defined(COMPAT_13) || defined(COMPAT_ULTRIX)
321 int
322 compat_13_sys_sigreturn(p, v, retval)
323 	struct proc *p;
324 	void *v;
325 	register_t *retval;
326 {
327 	struct compat_13_sys_sigreturn_args /* {
328 		syscallarg(struct sigcontext13 *) sigcntxp;
329 	} */ *uap = v;
330 	struct trapframe *scf;
331 	struct sigcontext13 *cntx;
332 	sigset_t mask;
333 
334 	scf = p->p_addr->u_pcb.framep;
335 	cntx = SCARG(uap, sigcntxp);
336 	if (uvm_useracc((caddr_t)cntx, sizeof (*cntx), B_READ) == 0)
337 		return EINVAL;
338 
339 	/* Compatibility mode? */
340 	if ((cntx->sc_ps & (PSL_IPL | PSL_IS)) ||
341 	    ((cntx->sc_ps & (PSL_U | PSL_PREVU)) != (PSL_U | PSL_PREVU)) ||
342 	    (cntx->sc_ps & PSL_CM)) {
343 		return (EINVAL);
344 	}
345 	if (cntx->sc_onstack & SS_ONSTACK)
346 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
347 	else
348 		p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
349 
350 	native_sigset13_to_sigset(&cntx->sc_mask, &mask);
351 	(void) sigprocmask1(p, SIG_SETMASK, &mask, 0);
352 
353 	scf->fp = cntx->sc_fp;
354 	scf->ap = cntx->sc_ap;
355 	scf->pc = cntx->sc_pc;
356 	scf->sp = cntx->sc_sp;
357 	scf->psl = cntx->sc_ps;
358 	return (EJUSTRETURN);
359 }
360 #endif
361 
362 int
363 sys___sigreturn14(p, v, retval)
364 	struct proc *p;
365 	void *v;
366 	register_t *retval;
367 {
368 	struct sys___sigreturn14_args /* {
369 		syscallarg(struct sigcontext *) sigcntxp;
370 	} */ *uap = v;
371 	struct trapframe *scf;
372 	struct sigcontext *cntx;
373 
374 	scf = p->p_addr->u_pcb.framep;
375 	cntx = SCARG(uap, sigcntxp);
376 
377 	if (uvm_useracc((caddr_t)cntx, sizeof (*cntx), B_READ) == 0)
378 		return EINVAL;
379 	/* Compatibility mode? */
380 	if ((cntx->sc_ps & (PSL_IPL | PSL_IS)) ||
381 	    ((cntx->sc_ps & (PSL_U | PSL_PREVU)) != (PSL_U | PSL_PREVU)) ||
382 	    (cntx->sc_ps & PSL_CM)) {
383 		return (EINVAL);
384 	}
385 	if (cntx->sc_onstack & 01)
386 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
387 	else
388 		p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
389 	/* Restore signal mask. */
390 	(void) sigprocmask1(p, SIG_SETMASK, &cntx->sc_mask, 0);
391 
392 	scf->fp = cntx->sc_fp;
393 	scf->ap = cntx->sc_ap;
394 	scf->pc = cntx->sc_pc;
395 	scf->sp = cntx->sc_sp;
396 	scf->psl = cntx->sc_ps;
397 	return (EJUSTRETURN);
398 }
399 
400 struct trampframe {
401 	unsigned	sig;	/* Signal number */
402 	unsigned	code;	/* Info code */
403 	unsigned	scp;	/* Pointer to struct sigcontext */
404 	unsigned	r0, r1, r2, r3, r4, r5; /* Registers saved when
405 						 * interrupt */
406 	unsigned	pc;	/* Address of signal handler */
407 	unsigned	arg;	/* Pointer to first (and only) sigreturn
408 				 * argument */
409 };
410 
411 void
412 sendsig(catcher, sig, mask, code)
413 	sig_t		catcher;
414 	int		sig;
415 	sigset_t	*mask;
416 	u_long		code;
417 {
418 	struct	proc	*p = curproc;
419 	struct	trapframe *syscf;
420 	struct	sigcontext *sigctx, gsigctx;
421 	struct	trampframe *trampf, gtrampf;
422 	unsigned	cursp;
423 	int	onstack;
424 
425 	syscf = p->p_addr->u_pcb.framep;
426 
427 	onstack =
428 	    (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
429 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
430 
431 	/* Allocate space for the signal handler context. */
432 	if (onstack)
433 		cursp = ((int)p->p_sigctx.ps_sigstk.ss_sp + p->p_sigctx.ps_sigstk.ss_size);
434 	else
435 		cursp = syscf->sp;
436 
437 	/* Set up positions for structs on stack */
438 	sigctx = (struct sigcontext *) (cursp - sizeof(struct sigcontext));
439 	trampf = (struct trampframe *) ((unsigned)sigctx -
440 	    sizeof(struct trampframe));
441 
442 	 /* Place for pointer to arg list in sigreturn */
443 	cursp = (unsigned)sigctx - 8;
444 
445 	gtrampf.arg = (int) sigctx;
446 	gtrampf.pc = (unsigned) catcher;
447 	/* r0..r5 are saved by the popr in the sigcode snippet */
448 	gtrampf.scp = (int) sigctx;
449 	gtrampf.code = code;
450 	gtrampf.sig = sig;
451 
452 	gsigctx.sc_pc = syscf->pc;
453 	gsigctx.sc_ps = syscf->psl;
454 	gsigctx.sc_ap = syscf->ap;
455 	gsigctx.sc_fp = syscf->fp;
456 	gsigctx.sc_sp = syscf->sp;
457 	gsigctx.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
458 	gsigctx.sc_mask = *mask;
459 
460 #if defined(COMPAT_13) || defined(COMPAT_ULTRIX)
461 	native_sigset_to_sigset13(mask, &gsigctx.__sc_mask13);
462 #endif
463 
464 	if (copyout(&gtrampf, trampf, sizeof(gtrampf)) ||
465 	    copyout(&gsigctx, sigctx, sizeof(gsigctx)))
466 		sigexit(p, SIGILL);
467 
468 	syscf->pc = (int)p->p_sigctx.ps_sigcode;
469 	syscf->psl = PSL_U | PSL_PREVU;
470 	syscf->ap = cursp;
471 	syscf->sp = cursp;
472 
473 	if (onstack)
474 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
475 }
476 
477 int	waittime = -1;
478 static	volatile int showto; /* Must be volatile to survive MM on -> MM off */
479 
480 void
481 cpu_reboot(howto, b)
482 	register int howto;
483 	char *b;
484 {
485 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
486 		waittime = 0;
487 		vfs_shutdown();
488 		/*
489 		 * If we've been adjusting the clock, the todr will be out of
490 		 * synch; adjust it now.
491 		 */
492 		resettodr();
493 	}
494 	splhigh();		/* extreme priority */
495 	if (howto & RB_HALT) {
496 		doshutdownhooks();
497 		if (dep_call->cpu_halt)
498 			(*dep_call->cpu_halt) ();
499 		printf("halting (in tight loop); hit\n\t^P\n\tHALT\n\n");
500 		for (;;)
501 			;
502 	} else {
503 		showto = howto;
504 #ifdef notyet
505 		/*
506 		 * If we are provided with a bootstring, parse it and send
507 		 * it to the boot program.
508 		 */
509 		if (b)
510 			while (*b) {
511 				showto |= (*b == 'a' ? RB_ASKBOOT : (*b == 'd' ?
512 				    RB_DEBUG : (*b == 's' ? RB_SINGLE : 0)));
513 				b++;
514 			}
515 #endif
516 		/*
517 		 * Now it's time to:
518 		 *  0. Save some registers that are needed in new world.
519 		 *  1. Change stack to somewhere that will survive MM off.
520 		 * (RPB page is good page to save things in).
521 		 *  2. Actually turn MM off.
522 		 *  3. Dump away memory to disk, if asked.
523 		 *  4. Reboot as asked.
524 		 * The RPB page is _always_ first page in memory, we can
525 		 * rely on that.
526 		 */
527 #ifdef notyet
528 		asm("	movl	%sp, (0x80000200)
529 			movl	0x80000200, %sp
530 			mfpr	$0x10, -(%sp)	# PR_PCBB
531 			mfpr	$0x11, -(%sp)	# PR_SCBB
532 			mfpr	$0xc, -(%sp)	# PR_SBR
533 			mfpr	$0xd, -(%sp)	# PR_SLR
534 			mtpr	$0, $0x38	# PR_MAPEN
535 		");
536 #endif
537 
538 		if (showto & RB_DUMP)
539 			dumpsys();
540 		if (dep_call->cpu_reboot)
541 			(*dep_call->cpu_reboot)(showto);
542 
543 		/* cpus that don't handle reboots get the standard reboot. */
544 		while ((mfpr(PR_TXCS) & GC_RDY) == 0)
545 			;
546 
547 		mtpr(GC_CONS|GC_BTFL, PR_TXDB);
548 	}
549 	asm("movl %0, %%r5":: "g" (showto)); /* How to boot */
550 	asm("movl %0, %%r11":: "r"(showto)); /* ??? */
551 	asm("halt");
552 	panic("Halt sket sej");
553 }
554 
555 void
556 dumpsys()
557 {
558 
559 	if (dumpdev == NODEV)
560 		return;
561 	/*
562 	 * For dumps during autoconfiguration, if dump device has already
563 	 * configured...
564 	 */
565 	if (dumpsize == 0)
566 		cpu_dumpconf();
567 	if (dumplo <= 0) {
568 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
569 		    minor(dumpdev));
570 		return;
571 	}
572 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
573 	    minor(dumpdev), dumplo);
574 	printf("dump ");
575 	switch ((*bdevsw[major(dumpdev)].d_dump) (dumpdev, 0, 0, 0)) {
576 
577 	case ENXIO:
578 		printf("device bad\n");
579 		break;
580 
581 	case EFAULT:
582 		printf("device not ready\n");
583 		break;
584 
585 	case EINVAL:
586 		printf("area improper\n");
587 		break;
588 
589 	case EIO:
590 		printf("i/o error\n");
591 		break;
592 
593 	default:
594 		printf("succeeded\n");
595 		break;
596 	}
597 }
598 
599 int
600 process_read_regs(p, regs)
601 	struct proc    *p;
602 	struct reg     *regs;
603 {
604 	struct trapframe *tf = p->p_addr->u_pcb.framep;
605 
606 	bcopy(&tf->r0, &regs->r0, 12 * sizeof(int));
607 	regs->ap = tf->ap;
608 	regs->fp = tf->fp;
609 	regs->sp = tf->sp;
610 	regs->pc = tf->pc;
611 	regs->psl = tf->psl;
612 	return 0;
613 }
614 
615 int
616 process_write_regs(p, regs)
617 	struct proc    *p;
618 	struct reg     *regs;
619 {
620 	struct trapframe *tf = p->p_addr->u_pcb.framep;
621 
622 	bcopy(&regs->r0, &tf->r0, 12 * sizeof(int));
623 	tf->ap = regs->ap;
624 	tf->fp = regs->fp;
625 	tf->sp = regs->sp;
626 	tf->pc = regs->pc;
627 	tf->psl = (regs->psl|PSL_U|PSL_PREVU) &
628 	    ~(PSL_MBZ|PSL_IS|PSL_IPL1F|PSL_CM); /* Allow compat mode? */
629 	return 0;
630 }
631 
632 int
633 process_set_pc(p, addr)
634 	struct	proc *p;
635 	caddr_t addr;
636 {
637 	struct	trapframe *tf;
638 	void	*ptr;
639 
640 	if ((p->p_flag & P_INMEM) == 0)
641 		return (EIO);
642 
643 	ptr = (char *) p->p_addr->u_pcb.framep;
644 	tf = ptr;
645 
646 	tf->pc = (unsigned) addr;
647 
648 	return (0);
649 }
650 
651 int
652 process_sstep(p, sstep)
653 	struct proc    *p;
654 {
655 	void	       *ptr;
656 	struct trapframe *tf;
657 
658 	if ((p->p_flag & P_INMEM) == 0)
659 		return (EIO);
660 
661 	ptr = p->p_addr->u_pcb.framep;
662 	tf = ptr;
663 
664 	if (sstep)
665 		tf->psl |= PSL_T;
666 	else
667 		tf->psl &= ~PSL_T;
668 
669 	return (0);
670 }
671 
672 #undef PHYSMEMDEBUG
673 /*
674  * Allocates a virtual range suitable for mapping in physical memory.
675  * This differs from the bus_space routines in that it allocates on
676  * physical page sizes instead of logical sizes. This implementation
677  * uses resource maps when allocating space, which is allocated from
678  * the IOMAP submap. The implementation is similar to the uba resource
679  * map handling. Size is given in pages.
680  * If the page requested is bigger than a logical page, space is
681  * allocated from the kernel map instead.
682  *
683  * It is known that the first page in the iospace area is unused; it may
684  * be use by console device drivers (before the map system is inited).
685  */
686 vaddr_t
687 vax_map_physmem(phys, size)
688 	paddr_t phys;
689 	int size;
690 {
691 	extern vaddr_t iospace;
692 	vaddr_t addr;
693 	int pageno;
694 	static int warned = 0;
695 
696 #ifdef DEBUG
697 	if (!iospace_inited)
698 		panic("vax_map_physmem: called before rminit()?!?");
699 #endif
700 	if (size >= LTOHPN) {
701 		addr = uvm_km_valloc(kernel_map, size * VAX_NBPG);
702 		if (addr == 0)
703 			panic("vax_map_physmem: kernel map full");
704 	} else {
705 		pageno = rmalloc(iomap, size);
706 		if (pageno == 0) {
707 			if (warned++ == 0) /* Warn only once */
708 				printf("vax_map_physmem: iomap too small");
709 			return 0;
710 		}
711 		addr = iospace + (pageno * VAX_NBPG);
712 	}
713 	ioaccess(addr, phys, size);
714 #ifdef PHYSMEMDEBUG
715 	printf("vax_map_physmem: alloc'ed %d pages for paddr %lx, at %lx\n",
716 	    size, phys, addr);
717 #endif
718 	return addr | (phys & VAX_PGOFSET);
719 }
720 
721 /*
722  * Unmaps the previous mapped (addr, size) pair.
723  */
724 void
725 vax_unmap_physmem(addr, size)
726 	vaddr_t addr;
727 	int size;
728 {
729 	extern vaddr_t iospace;
730 	int pageno = (addr - iospace) / VAX_NBPG;
731 #ifdef PHYSMEMDEBUG
732 	printf("vax_unmap_physmem: unmapping %d pages at addr %lx\n",
733 	    size, addr);
734 #endif
735 	iounaccess(addr, size);
736 	if (size >= LTOHPN)
737 		uvm_km_free(kernel_map, addr, size * VAX_NBPG);
738 	else
739 		rmfree(iomap, size, pageno);
740 }
741 
742 void *
743 softintr_establish(int ipl, void (*func)(void *), void *arg)
744 {
745 	struct softintr_handler *sh;
746 	struct softintr_head *shd;
747 
748 	switch (ipl) {
749 	case IPL_SOFTCLOCK: shd = &softclock_head; break;
750 	case IPL_SOFTNET: shd = &softnet_head; break;
751 	case IPL_SOFTSERIAL: shd = &softserial_head; break;
752 	default: panic("softintr_establish: unsupported soft IPL");
753 	}
754 
755 	sh = malloc(sizeof(*sh), M_SOFTINTR, M_NOWAIT);
756 	if (sh == NULL)
757 		return NULL;
758 
759 	LIST_INSERT_HEAD(&shd->shd_intrs, sh, sh_link);
760 	sh->sh_head = shd;
761 	sh->sh_pending = 0;
762 	sh->sh_func = func;
763 	sh->sh_arg = arg;
764 
765 	return sh;
766 }
767 
768 void
769 softintr_disestablish(void *arg)
770 {
771 	struct softintr_handler *sh = arg;
772 	LIST_REMOVE(sh, sh_link);
773 	free(sh, M_SOFTINTR);
774 }
775 
776 #include <dev/bi/bivar.h>
777 /*
778  * This should be somewhere else.
779  */
780 void
781 bi_intr_establish(void *icookie, int vec, void (*func)(void *), void *arg,
782 	struct evcnt *ev)
783 {
784 	scb_vecalloc(vec, func, arg, SCB_ISTACK, ev);
785 }
786 
787 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
788 /*
789  * Called from locore.
790  */
791 void	krnlock(void);
792 void	krnunlock(void);
793 
794 void
795 krnlock()
796 {
797 	KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
798 }
799 
800 void
801 krnunlock()
802 {
803 	KERNEL_UNLOCK();
804 }
805 #endif
806