xref: /original-bsd/sys/hp300/hp300/trap.c (revision ba762ddc)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.28 89/09/25$
13  *
14  *	@(#)trap.c	7.10 (Berkeley) 04/20/91
15  */
16 
17 #include "param.h"
18 #include "systm.h"
19 #include "proc.h"
20 #include "seg.h"
21 #include "acct.h"
22 #include "kernel.h"
23 #include "signalvar.h"
24 #include "resourcevar.h"
25 #include "syslog.h"
26 #include "user.h"
27 #ifdef KTRACE
28 #include "ktrace.h"
29 #endif
30 
31 #include "../include/trap.h"
32 #include "../include/cpu.h"
33 #include "../include/psl.h"
34 #include "../include/reg.h"
35 #include "../include/mtpr.h"
36 
37 #include "vm/vm.h"
38 #include "vm/pmap.h"
39 #include "vmmeter.h"
40 
41 #ifdef HPUXCOMPAT
42 #include "../hpux/hpux.h"
43 #endif
44 
45 #define	USER	040		/* user-mode flag added to type */
46 
47 struct	sysent	sysent[];
48 int	nsysent;
49 
50 char	*trap_type[] = {
51 	"Bus error",
52 	"Address error",
53 	"Illegal instruction",
54 	"Zero divide",
55 	"CHK instruction",
56 	"TRAPV instruction",
57 	"Privilege violation",
58 	"Trace trap",
59 	"MMU fault",
60 	"SSIR trap",
61 	"Format error",
62 	"68881 exception",
63 	"Coprocessor violation",
64 	"Async system trap"
65 };
66 #define	TRAP_TYPES	(sizeof trap_type / sizeof trap_type[0])
67 
68 #ifdef DEBUG
69 int mmudebug = 0;
70 #endif
71 
72 /*
73  * Called from the trap handler when a processor trap occurs.
74  */
75 /*ARGSUSED*/
76 trap(type, code, v, frame)
77 	int type;
78 	unsigned code;
79 	register unsigned v;
80 	struct frame frame;
81 {
82 	register int i;
83 	unsigned ucode = 0;
84 	register struct proc *p = curproc;
85 	struct timeval syst;
86 	unsigned ncode;
87 
88 	cnt.v_trap++;
89 	syst = p->p_stime;
90 	if (USERMODE(frame.f_sr)) {
91 		type |= USER;
92 		p->p_regs = frame.f_regs;
93 	}
94 	switch (type) {
95 
96 	default:
97 dopanic:
98 		printf("trap type %d, code = %x, v = %x\n", type, code, v);
99 		regdump(frame.f_regs, 128);
100 		type &= ~USER;
101 		if ((unsigned)type < TRAP_TYPES)
102 			panic(trap_type[type]);
103 		panic("trap");
104 
105 	case T_BUSERR:		/* kernel bus error */
106 		if (!u.u_pcb.pcb_onfault)
107 			goto dopanic;
108 		/*
109 		 * If we have arranged to catch this fault in any of the
110 		 * copy to/from user space routines, set PC to return to
111 		 * indicated location and set flag informing buserror code
112 		 * that it may need to clean up stack frame.
113 		 */
114 copyfault:
115 		frame.f_pc = (int) u.u_pcb.pcb_onfault;
116 		frame.f_stackadj = -1;
117 		return;
118 
119 	case T_BUSERR+USER:	/* bus error */
120 	case T_ADDRERR+USER:	/* address error */
121 		i = SIGBUS;
122 		break;
123 
124 #ifdef FPCOPROC
125 	case T_COPERR:		/* kernel coprocessor violation */
126 #endif
127 	case T_FMTERR:		/* kernel format error */
128 	/*
129 	 * The user has most likely trashed the RTE or FP state info
130 	 * in the stack frame of a signal handler.
131 	 */
132 		type |= USER;
133 		printf("pid %d: kernel %s exception\n", p->p_pid,
134 		       type==T_COPERR ? "coprocessor" : "format");
135 		p->p_sigacts->ps_sigact[SIGILL] = SIG_DFL;
136 		i = sigmask(SIGILL);
137 		p->p_sigignore &= ~i;
138 		p->p_sigcatch &= ~i;
139 		p->p_sigmask &= ~i;
140 		i = SIGILL;
141 		ucode = frame.f_format;	/* XXX was ILL_RESAD_FAULT */
142 		break;
143 
144 #ifdef FPCOPROC
145 	case T_COPERR+USER:	/* user coprocessor violation */
146 	/* What is a proper response here? */
147 		ucode = 0;
148 		i = SIGFPE;
149 		break;
150 
151 	case T_FPERR+USER:		/* 68881 exceptions */
152 	/*
153 	 * We pass along the 68881 status register which locore stashed
154 	 * in code for us.  Note that there is a possibility that the
155 	 * bit pattern of this register will conflict with one of the
156 	 * FPE_* codes defined in signal.h.  Fortunately for us, the
157 	 * only such codes we use are all in the range 1-7 and the low
158 	 * 3 bits of the status register are defined as 0 so there is
159 	 * no clash.
160 	 */
161 		ucode = code;
162 		i = SIGFPE;
163 		break;
164 #endif
165 
166 	case T_ILLINST+USER:	/* illegal instruction fault */
167 #ifdef HPUXCOMPAT
168 		if (p->p_flag & SHPUX) {
169 			ucode = HPUX_ILL_ILLINST_TRAP;
170 			i = SIGILL;
171 			break;
172 		}
173 		/* fall through */
174 #endif
175 	case T_PRIVINST+USER:	/* privileged instruction fault */
176 #ifdef HPUXCOMPAT
177 		if (p->p_flag & SHPUX)
178 			ucode = HPUX_ILL_PRIV_TRAP;
179 		else
180 #endif
181 		ucode = frame.f_format;	/* XXX was ILL_PRIVIN_FAULT */
182 		i = SIGILL;
183 		break;
184 
185 	case T_ZERODIV+USER:	/* Divide by zero */
186 #ifdef HPUXCOMPAT
187 		if (p->p_flag & SHPUX)
188 			ucode = HPUX_FPE_INTDIV_TRAP;
189 		else
190 #endif
191 		ucode = frame.f_format;	/* XXX was FPE_INTDIV_TRAP */
192 		i = SIGFPE;
193 		break;
194 
195 	case T_CHKINST+USER:	/* CHK instruction trap */
196 #ifdef HPUXCOMPAT
197 		if (p->p_flag & SHPUX) {
198 			/* handled differently under hp-ux */
199 			i = SIGILL;
200 			ucode = HPUX_ILL_CHK_TRAP;
201 			break;
202 		}
203 #endif
204 		ucode = frame.f_format;	/* XXX was FPE_SUBRNG_TRAP */
205 		i = SIGFPE;
206 		break;
207 
208 	case T_TRAPVINST+USER:	/* TRAPV instruction trap */
209 #ifdef HPUXCOMPAT
210 		if (p->p_flag & SHPUX) {
211 			/* handled differently under hp-ux */
212 			i = SIGILL;
213 			ucode = HPUX_ILL_TRAPV_TRAP;
214 			break;
215 		}
216 #endif
217 		ucode = frame.f_format;	/* XXX was FPE_INTOVF_TRAP */
218 		i = SIGFPE;
219 		break;
220 
221 	/*
222 	 * XXX: Trace traps are a nightmare.
223 	 *
224 	 *	HP-UX uses trap #1 for breakpoints,
225 	 *	HPBSD uses trap #2,
226 	 *	SUN 3.x uses trap #15,
227 	 *	KGDB uses trap #15 (for kernel breakpoints; handled elsewhere).
228 	 *
229 	 * HPBSD and HP-UX traps both get mapped by locore.s into T_TRACE.
230 	 * SUN 3.x traps get passed through as T_TRAP15 and are not really
231 	 * supported yet.
232 	 */
233 	case T_TRACE:		/* kernel trace trap */
234 	case T_TRAP15:		/* SUN trace trap */
235 		frame.f_sr &= ~PSL_T;
236 		i = SIGTRAP;
237 		break;
238 
239 	case T_TRACE+USER:	/* user trace trap */
240 	case T_TRAP15+USER:	/* SUN user trace trap */
241 		frame.f_sr &= ~PSL_T;
242 		i = SIGTRAP;
243 		break;
244 
245 	case T_ASTFLT:		/* system async trap, cannot happen */
246 		goto dopanic;
247 
248 	case T_ASTFLT+USER:	/* user async trap */
249 		astoff();
250 		/*
251 		 * We check for software interrupts first.  This is because
252 		 * they are at a higher level than ASTs, and on a VAX would
253 		 * interrupt the AST.  We assume that if we are processing
254 		 * an AST that we must be at IPL0 so we don't bother to
255 		 * check.  Note that we ensure that we are at least at SIR
256 		 * IPL while processing the SIR.
257 		 */
258 		spl1();
259 		/* fall into... */
260 
261 	case T_SSIR:		/* software interrupt */
262 	case T_SSIR+USER:
263 		if (ssir & SIR_NET) {
264 			siroff(SIR_NET);
265 			cnt.v_soft++;
266 			netintr();
267 		}
268 		if (ssir & SIR_CLOCK) {
269 			siroff(SIR_CLOCK);
270 			cnt.v_soft++;
271 			softclock((caddr_t)frame.f_pc, (int)frame.f_sr);
272 		}
273 		/*
274 		 * If this was not an AST trap, we are all done.
275 		 */
276 		if (type != T_ASTFLT+USER) {
277 			cnt.v_trap--;
278 			return;
279 		}
280 		spl0();
281 #ifndef PROFTIMER
282 		if ((p->p_flag&SOWEUPC) && p->p_stats->p_prof.pr_scale) {
283 			addupc(frame.f_pc, &p->p_stats->p_prof, 1);
284 			p->p_flag &= ~SOWEUPC;
285 		}
286 #endif
287 		goto out;
288 
289 	case T_MMUFLT:		/* kernel mode page fault */
290 		/* fall into ... */
291 
292 	case T_MMUFLT+USER:	/* page fault */
293 	    {
294 		register vm_offset_t va;
295 		register struct vmspace *vm = p->p_vmspace;
296 		register vm_map_t map;
297 		int rv;
298 		vm_prot_t ftype;
299 		extern vm_map_t kernel_map;
300 		unsigned nss;
301 
302 		/*
303 		 * It is only a kernel address space fault iff:
304 		 * 	1. (type & USER) == 0  and
305 		 * 	2. pcb_onfault not set or
306 		 *	3. pcb_onfault set but supervisor space data fault
307 		 * The last can occur during an exec() copyin where the
308 		 * argument space is lazy-allocated.
309 		 */
310 		if (type == T_MMUFLT &&
311 		    (!u.u_pcb.pcb_onfault ||
312 		     (code & (SSW_DF|FC_SUPERD)) == (SSW_DF|FC_SUPERD)))
313 			map = kernel_map;
314 		else
315 			map = &vm->vm_map;
316 		if ((code & (SSW_DF|SSW_RW)) == SSW_DF)	/* what about RMW? */
317 			ftype = VM_PROT_READ | VM_PROT_WRITE;
318 		else
319 			ftype = VM_PROT_READ;
320 		va = trunc_page((vm_offset_t)v);
321 #ifdef DEBUG
322 		if (map == kernel_map && va == 0) {
323 			printf("trap: bad kernel access at %x\n", v);
324 			goto dopanic;
325 		}
326 #endif
327 		/*
328 		 * XXX: rude hack to make stack limits "work"
329 		 */
330 		nss = 0;
331 		if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map) {
332 			nss = clrnd(btoc(USRSTACK-(unsigned)va));
333 			if (nss > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
334 				rv = KERN_FAILURE;
335 				goto nogo;
336 			}
337 		}
338 		rv = vm_fault(map, va, ftype, FALSE);
339 		if (rv == KERN_SUCCESS) {
340 			/*
341 			 * XXX: continuation of rude stack hack
342 			 */
343 			if (nss > vm->vm_ssize)
344 				vm->vm_ssize = nss;
345 			if (type == T_MMUFLT)
346 				return;
347 			goto out;
348 		}
349 nogo:
350 		if (type == T_MMUFLT) {
351 			if (u.u_pcb.pcb_onfault)
352 				goto copyfault;
353 			printf("vm_fault(%x, %x, %x, 0) -> %x\n",
354 			       map, va, ftype, rv);
355 			printf("  type %x, code [mmu,,ssw]: %x\n",
356 			       type, code);
357 			goto dopanic;
358 		}
359 		ucode = v;
360 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
361 		break;
362 	    }
363 	}
364 	trapsignal(p, i, ucode);
365 	if ((type & USER) == 0)
366 		return;
367 out:
368 	while (i = CURSIG(p))
369 		psig(i);
370 	p->p_pri = p->p_usrpri;
371 	if (want_resched) {
372 		/*
373 		 * Since we are curproc, clock will normally just change
374 		 * our priority without moving us from one queue to another
375 		 * (since the running process is not on a queue.)
376 		 * If that happened after we setrq ourselves but before we
377 		 * swtch()'ed, we might not be on the queue indicated by
378 		 * our priority.
379 		 */
380 		(void) splclock();
381 		setrq(p);
382 		p->p_stats->p_ru.ru_nivcsw++;
383 		swtch();
384 		while (i = CURSIG(p))
385 			psig(i);
386 	}
387 	if (p->p_stats->p_prof.pr_scale) {
388 		int ticks;
389 		struct timeval *tv = &p->p_stime;
390 
391 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
392 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
393 		if (ticks) {
394 #ifdef PROFTIMER
395 			extern int profscale;
396 			addupc(frame.f_pc, &p->p_stats->p_prof,
397 			    ticks * profscale);
398 #else
399 			addupc(frame.f_pc, &p->p_stats->p_prof, ticks);
400 #endif
401 		}
402 	}
403 	curpri = p->p_pri;
404 }
405 
406 /*
407  * Called from the trap handler when a system call occurs
408  */
409 /*ARGSUSED*/
410 syscall(code, frame)
411 	volatile int code;
412 	struct frame frame;
413 {
414 	register caddr_t params;
415 	register int i;
416 	register struct sysent *callp;
417 	register struct proc *p = curproc;
418 	int error, opc, numsys;
419 	struct args {
420 		int i[8];
421 	} args;
422 	int rval[2];
423 	struct timeval syst;
424 	struct sysent *systab;
425 #ifdef HPUXCOMPAT
426 	extern struct sysent hpuxsysent[];
427 	extern int hpuxnsysent, notimp();
428 #endif
429 
430 	cnt.v_syscall++;
431 	syst = p->p_stime;
432 	if (!USERMODE(frame.f_sr))
433 		panic("syscall");
434 	p->p_regs = frame.f_regs;
435 	opc = frame.f_pc - 2;
436 	systab = sysent;
437 	numsys = nsysent;
438 #ifdef HPUXCOMPAT
439 	if (p->p_flag & SHPUX) {
440 		systab = hpuxsysent;
441 		numsys = hpuxnsysent;
442 	}
443 #endif
444 	params = (caddr_t)frame.f_regs[SP] + NBPW;
445 	if (code == 0) {			/* indir */
446 		code = fuword(params);
447 		params += NBPW;
448 	}
449 	if (code >= numsys)
450 		callp = &systab[0];		/* indir (illegal) */
451 	else
452 		callp = &systab[code];
453 	if ((i = callp->sy_narg * sizeof (int)) &&
454 	    (error = copyin(params, (caddr_t)&args, (u_int)i))) {
455 #ifdef HPUXCOMPAT
456 		if (p->p_flag & SHPUX)
457 			error = bsdtohpuxerrno(error);
458 #endif
459 		frame.f_regs[D0] = (u_char) error;
460 		frame.f_sr |= PSL_C;	/* carry bit */
461 #ifdef KTRACE
462 		if (KTRPOINT(p, KTR_SYSCALL))
463 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
464 #endif
465 		goto done;
466 	}
467 #ifdef KTRACE
468 	if (KTRPOINT(p, KTR_SYSCALL))
469 		ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
470 #endif
471 	rval[0] = 0;
472 	rval[1] = frame.f_regs[D1];
473 #ifdef HPUXCOMPAT
474 	/* debug kludge */
475 	if (callp->sy_call == notimp)
476 		error = notimp(p, args.i, rval, code, callp->sy_narg);
477 	else
478 #endif
479 	error = (*callp->sy_call)(p, &args, rval);
480 	if (error == ERESTART)
481 		frame.f_pc = opc;
482 	else if (error != EJUSTRETURN) {
483 		if (error) {
484 #ifdef HPUXCOMPAT
485 			if (p->p_flag & SHPUX)
486 				error = bsdtohpuxerrno(error);
487 #endif
488 			frame.f_regs[D0] = (u_char) error;
489 			frame.f_sr |= PSL_C;	/* carry bit */
490 		} else {
491 			frame.f_regs[D0] = rval[0];
492 			frame.f_regs[D1] = rval[1];
493 			frame.f_sr &= ~PSL_C;
494 		}
495 	}
496 	/* else if (error == EJUSTRETURN) */
497 		/* nothing to do */
498 
499 done:
500 	/*
501 	 * Reinitialize proc pointer `p' as it may be different
502 	 * if this is a child returning from fork syscall.
503 	 */
504 	p = curproc;
505 	/*
506 	 * XXX the check for sigreturn ensures that we don't
507 	 * attempt to set up a call to a signal handler (sendsig) before
508 	 * we have cleaned up the stack from the last call (sigreturn).
509 	 * Allowing this seems to lock up the machine in certain scenarios.
510 	 * What should really be done is to clean up the signal handling
511 	 * so that this is not a problem.
512 	 */
513 #include "sys/syscall.h"
514 	if (code != SYS_sigreturn)
515 		while (i = CURSIG(p))
516 			psig(i);
517 	p->p_pri = p->p_usrpri;
518 	if (want_resched) {
519 		/*
520 		 * Since we are curproc, clock will normally just change
521 		 * our priority without moving us from one queue to another
522 		 * (since the running process is not on a queue.)
523 		 * If that happened after we setrq ourselves but before we
524 		 * swtch()'ed, we might not be on the queue indicated by
525 		 * our priority.
526 		 */
527 		(void) splclock();
528 		setrq(p);
529 		p->p_stats->p_ru.ru_nivcsw++;
530 		swtch();
531 		if (code != SYS_sigreturn)
532 			while (i = CURSIG(p))
533 				psig(i);
534 	}
535 	if (p->p_stats->p_prof.pr_scale) {
536 		int ticks;
537 		struct timeval *tv = &p->p_stime;
538 
539 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
540 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
541 		if (ticks) {
542 #ifdef PROFTIMER
543 			extern int profscale;
544 			addupc(frame.f_pc, &p->p_stats->p_prof,
545 			    ticks * profscale);
546 #else
547 			addupc(frame.f_pc, &p->p_stats->p_prof, ticks);
548 #endif
549 		}
550 	}
551 	curpri = p->p_pri;
552 #ifdef KTRACE
553 	if (KTRPOINT(p, KTR_SYSRET))
554 		ktrsysret(p->p_tracep, code, error, rval[0]);
555 #endif
556 }
557