xref: /original-bsd/sys/hp300/hp300/trap.c (revision fbcc2ded)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.28 89/09/25$
13  *
14  *	@(#)trap.c	7.8 (Berkeley) 12/05/90
15  */
16 
17 #include "cpu.h"
18 #include "psl.h"
19 #include "reg.h"
20 #include "mtpr.h"
21 
22 #include "param.h"
23 #include "systm.h"
24 #include "user.h"
25 #include "proc.h"
26 #include "seg.h"
27 #include "trap.h"
28 #include "acct.h"
29 #include "kernel.h"
30 #include "syslog.h"
31 #ifdef KTRACE
32 #include "ktrace.h"
33 #endif
34 
35 #include "../vm/vm_param.h"
36 #include "../vm/pmap.h"
37 #include "../vm/vm_map.h"
38 #include "vmmeter.h"
39 
40 #ifdef HPUXCOMPAT
41 #include "../hpux/hpux.h"
42 #endif
43 
44 #define	USER	040		/* user-mode flag added to type */
45 
46 struct	sysent	sysent[];
47 int	nsysent;
48 
49 char	*trap_type[] = {
50 	"Bus error",
51 	"Address error",
52 	"Illegal instruction",
53 	"Zero divide",
54 	"CHK instruction",
55 	"TRAPV instruction",
56 	"Privilege violation",
57 	"Trace trap",
58 	"MMU fault",
59 	"SSIR trap",
60 	"Format error",
61 	"68881 exception",
62 	"Coprocessor violation",
63 	"Async system trap"
64 };
65 #define	TRAP_TYPES	(sizeof trap_type / sizeof trap_type[0])
66 
67 #ifdef DEBUG
68 int mmudebug = 0;
69 #endif
70 
71 /*
72  * Called from the trap handler when a processor trap occurs.
73  */
74 /*ARGSUSED*/
75 trap(type, code, v, frame)
76 	int type;
77 	unsigned code;
78 	register unsigned v;
79 	struct frame frame;
80 {
81 	register int i;
82 	unsigned ucode = 0;
83 	register struct proc *p = u.u_procp;
84 	struct timeval syst;
85 	unsigned ncode;
86 
87 	cnt.v_trap++;
88 	syst = u.u_ru.ru_stime;
89 	if (USERMODE(frame.f_sr)) {
90 		type |= USER;
91 		u.u_ar0 = frame.f_regs;
92 	}
93 	switch (type) {
94 
95 	default:
96 dopanic:
97 #ifdef KGDB
98 		if (!panicstr && kgdb_trap(type, code, v, &frame))
99 			return;
100 #endif
101 		printf("trap type %d, code = %x, v = %x\n", type, code, v);
102 		regdump(frame.f_regs, 128);
103 		type &= ~USER;
104 		if ((unsigned)type < TRAP_TYPES)
105 			panic(trap_type[type]);
106 		panic("trap");
107 
108 	case T_BUSERR:		/* kernel bus error */
109 		if (!u.u_pcb.pcb_onfault)
110 			goto dopanic;
111 		/*
112 		 * If we have arranged to catch this fault in any of the
113 		 * copy to/from user space routines, set PC to return to
114 		 * indicated location and set flag informing buserror code
115 		 * that it may need to clean up stack frame.
116 		 */
117 copyfault:
118 		frame.f_pc = (int) u.u_pcb.pcb_onfault;
119 		frame.f_stackadj = -1;
120 		return;
121 
122 	case T_BUSERR+USER:	/* bus error */
123 	case T_ADDRERR+USER:	/* address error */
124 		i = SIGBUS;
125 		break;
126 
127 #ifdef FPCOPROC
128 	case T_COPERR:		/* kernel coprocessor violation */
129 #endif
130 	case T_FMTERR:		/* kernel format error */
131 	/*
132 	 * The user has most likely trashed the RTE or FP state info
133 	 * in the stack frame of a signal handler.
134 	 */
135 		type |= USER;
136 		printf("pid %d: kernel %s exception\n", u.u_procp->p_pid,
137 		       type==T_COPERR ? "coprocessor" : "format");
138 		u.u_signal[SIGILL] = SIG_DFL;
139 		i = sigmask(SIGILL);
140 		p->p_sigignore &= ~i;
141 		p->p_sigcatch &= ~i;
142 		p->p_sigmask &= ~i;
143 		i = SIGILL;
144 		ucode = frame.f_format;	/* XXX was ILL_RESAD_FAULT */
145 		break;
146 
147 #ifdef FPCOPROC
148 	case T_COPERR+USER:	/* user coprocessor violation */
149 	/* What is a proper response here? */
150 		ucode = 0;
151 		i = SIGFPE;
152 		break;
153 
154 	case T_FPERR+USER:		/* 68881 exceptions */
155 	/*
156 	 * We pass along the 68881 status register which locore stashed
157 	 * in code for us.  Note that there is a possibility that the
158 	 * bit pattern of this register will conflict with one of the
159 	 * FPE_* codes defined in signal.h.  Fortunately for us, the
160 	 * only such codes we use are all in the range 1-7 and the low
161 	 * 3 bits of the status register are defined as 0 so there is
162 	 * no clash.
163 	 */
164 		ucode = code;
165 		i = SIGFPE;
166 		break;
167 #endif
168 
169 	case T_ILLINST+USER:	/* illegal instruction fault */
170 #ifdef HPUXCOMPAT
171 		if (u.u_procp->p_flag & SHPUX) {
172 			ucode = HPUX_ILL_ILLINST_TRAP;
173 			i = SIGILL;
174 			break;
175 		}
176 		/* fall through */
177 #endif
178 	case T_PRIVINST+USER:	/* privileged instruction fault */
179 #ifdef HPUXCOMPAT
180 		if (u.u_procp->p_flag & SHPUX)
181 			ucode = HPUX_ILL_PRIV_TRAP;
182 		else
183 #endif
184 		ucode = frame.f_format;	/* XXX was ILL_PRIVIN_FAULT */
185 		i = SIGILL;
186 		break;
187 
188 	case T_ZERODIV+USER:	/* Divide by zero */
189 #ifdef HPUXCOMPAT
190 		if (u.u_procp->p_flag & SHPUX)
191 			ucode = HPUX_FPE_INTDIV_TRAP;
192 		else
193 #endif
194 		ucode = frame.f_format;	/* XXX was FPE_INTDIV_TRAP */
195 		i = SIGFPE;
196 		break;
197 
198 	case T_CHKINST+USER:	/* CHK instruction trap */
199 #ifdef HPUXCOMPAT
200 		if (u.u_procp->p_flag & SHPUX) {
201 			/* handled differently under hp-ux */
202 			i = SIGILL;
203 			ucode = HPUX_ILL_CHK_TRAP;
204 			break;
205 		}
206 #endif
207 		ucode = frame.f_format;	/* XXX was FPE_SUBRNG_TRAP */
208 		i = SIGFPE;
209 		break;
210 
211 	case T_TRAPVINST+USER:	/* TRAPV instruction trap */
212 #ifdef HPUXCOMPAT
213 		if (u.u_procp->p_flag & SHPUX) {
214 			/* handled differently under hp-ux */
215 			i = SIGILL;
216 			ucode = HPUX_ILL_TRAPV_TRAP;
217 			break;
218 		}
219 #endif
220 		ucode = frame.f_format;	/* XXX was FPE_INTOVF_TRAP */
221 		i = SIGFPE;
222 		break;
223 
224 	/*
225 	 * XXX: Trace traps are a nightmare.
226 	 *
227 	 *	HP-UX uses trap #1 for breakpoints,
228 	 *	HPBSD uses trap #2,
229 	 *	SUN 3.x uses trap #15,
230 	 *	KGDB uses trap #15 (for kernel breakpoints).
231 	 *
232 	 * HPBSD and HP-UX traps both get mapped by locore.s into T_TRACE.
233 	 * SUN 3.x traps get passed through as T_TRAP15 and are not really
234 	 * supported yet.  KGDB traps are also passed through as T_TRAP15
235 	 * and are not used yet.
236 	 */
237 	case T_TRACE:		/* kernel trace trap */
238 	case T_TRAP15:		/* SUN (or KGDB) kernel trace trap */
239 #ifdef KGDB
240 		if (kgdb_trap(type, code, v, &frame))
241 			return;
242 #endif
243 		frame.f_sr &= ~PSL_T;
244 		i = SIGTRAP;
245 		break;
246 
247 	case T_TRACE+USER:	/* user trace trap */
248 	case T_TRAP15+USER:	/* SUN user trace trap */
249 		frame.f_sr &= ~PSL_T;
250 		i = SIGTRAP;
251 		break;
252 
253 	case T_ASTFLT:		/* system async trap, cannot happen */
254 		goto dopanic;
255 
256 	case T_ASTFLT+USER:	/* user async trap */
257 		astoff();
258 		/*
259 		 * We check for software interrupts first.  This is because
260 		 * they are at a higher level than ASTs, and on a VAX would
261 		 * interrupt the AST.  We assume that if we are processing
262 		 * an AST that we must be at IPL0 so we don't bother to
263 		 * check.  Note that we ensure that we are at least at SIR
264 		 * IPL while processing the SIR.
265 		 */
266 		spl1();
267 		/* fall into... */
268 
269 	case T_SSIR:		/* software interrupt */
270 	case T_SSIR+USER:
271 		if (ssir & SIR_NET) {
272 			siroff(SIR_NET);
273 			cnt.v_soft++;
274 			netintr();
275 		}
276 		if (ssir & SIR_CLOCK) {
277 			siroff(SIR_CLOCK);
278 			cnt.v_soft++;
279 			softclock((caddr_t)frame.f_pc, (int)frame.f_sr);
280 		}
281 		/*
282 		 * If this was not an AST trap, we are all done.
283 		 */
284 		if (type != T_ASTFLT+USER) {
285 			cnt.v_trap--;
286 			return;
287 		}
288 		spl0();
289 #ifndef PROFTIMER
290 		if ((u.u_procp->p_flag&SOWEUPC) && u.u_prof.pr_scale) {
291 			addupc(frame.f_pc, &u.u_prof, 1);
292 			u.u_procp->p_flag &= ~SOWEUPC;
293 		}
294 #endif
295 		goto out;
296 
297 	case T_MMUFLT:		/* kernel mode page fault */
298 		/* fall into ... */
299 
300 	case T_MMUFLT+USER:	/* page fault */
301 	    {
302 		register vm_offset_t va;
303 		register vm_map_t map;
304 		int rv;
305 		vm_prot_t ftype;
306 		extern vm_map_t kernel_map;
307 		unsigned nss;
308 
309 		/*
310 		 * It is only a kernel address space fault iff:
311 		 * 	1. (type & USER) == 0  and
312 		 * 	2. pcb_onfault not set or
313 		 *	3. pcb_onfault set but supervisor space data fault
314 		 * The last can occur during an exec() copyin where the
315 		 * argument space is lazy-allocated.
316 		 */
317 		if (type == T_MMUFLT &&
318 		    (!u.u_pcb.pcb_onfault ||
319 		     (code & (SSW_DF|FC_SUPERD)) == (SSW_DF|FC_SUPERD)))
320 			map = kernel_map;
321 		else
322 			map = u.u_procp->p_map;
323 		if ((code & (SSW_DF|SSW_RW)) == SSW_DF)	/* what about RMW? */
324 			ftype = VM_PROT_READ | VM_PROT_WRITE;
325 		else
326 			ftype = VM_PROT_READ;
327 		va = trunc_page((vm_offset_t)v);
328 #ifdef DEBUG
329 		if (map == kernel_map && va == 0) {
330 			printf("trap: bad kernel access at %x\n", v);
331 			goto dopanic;
332 		}
333 #endif
334 		/*
335 		 * XXX: rude hack to make stack limits "work"
336 		 */
337 		nss = 0;
338 		if ((caddr_t)va >= u.u_maxsaddr && map != kernel_map) {
339 			nss = clrnd(btoc(USRSTACK-(unsigned)va));
340 			if (nss > btoc(u.u_rlimit[RLIMIT_STACK].rlim_cur)) {
341 				rv = KERN_FAILURE;
342 				goto nogo;
343 			}
344 		}
345 		rv = vm_fault(map, va, ftype, FALSE);
346 		if (rv == KERN_SUCCESS) {
347 			/*
348 			 * XXX: continuation of rude stack hack
349 			 */
350 			if (nss > u.u_ssize)
351 				u.u_ssize = nss;
352 			if (type == T_MMUFLT)
353 				return;
354 			goto out;
355 		}
356 nogo:
357 		if (type == T_MMUFLT) {
358 			if (u.u_pcb.pcb_onfault)
359 				goto copyfault;
360 			printf("vm_fault(%x, %x, %x, 0) -> %x\n",
361 			       map, va, ftype, rv);
362 			printf("  type %x, code [mmu,,ssw]: %x\n",
363 			       type, code);
364 			goto dopanic;
365 		}
366 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
367 		break;
368 	    }
369 	}
370 	trapsignal(i, ucode);
371 	if ((type & USER) == 0)
372 		return;
373 out:
374 	p = u.u_procp;
375 	if (i = CURSIG(p))
376 		psig(i);
377 	p->p_pri = p->p_usrpri;
378 	if (runrun) {
379 		/*
380 		 * Since we are u.u_procp, clock will normally just change
381 		 * our priority without moving us from one queue to another
382 		 * (since the running process is not on a queue.)
383 		 * If that happened after we setrq ourselves but before we
384 		 * swtch()'ed, we might not be on the queue indicated by
385 		 * our priority.
386 		 */
387 		(void) splclock();
388 		setrq(p);
389 		u.u_ru.ru_nivcsw++;
390 		swtch();
391 		if (i = CURSIG(p))
392 			psig(i);
393 	}
394 	if (u.u_prof.pr_scale) {
395 		int ticks;
396 		struct timeval *tv = &u.u_ru.ru_stime;
397 
398 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
399 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
400 		if (ticks) {
401 #ifdef PROFTIMER
402 			extern int profscale;
403 			addupc(frame.f_pc, &u.u_prof, ticks * profscale);
404 #else
405 			addupc(frame.f_pc, &u.u_prof, ticks);
406 #endif
407 		}
408 	}
409 	curpri = p->p_pri;
410 }
411 
412 /*
413  * Called from the trap handler when a system call occurs
414  */
415 /*ARGSUSED*/
416 syscall(code, frame)
417 	volatile int code;
418 	struct frame frame;
419 {
420 	register caddr_t params;
421 	register int i;
422 	register struct sysent *callp;
423 	register struct proc *p = u.u_procp;
424 	int error, opc, numsys;
425 	struct args {
426 		int i[8];
427 	} args;
428 	int rval[2];
429 	struct timeval syst;
430 	struct sysent *systab;
431 #ifdef HPUXCOMPAT
432 	extern struct sysent hpuxsysent[];
433 	extern int hpuxnsysent, notimp();
434 #endif
435 
436 	cnt.v_syscall++;
437 	syst = u.u_ru.ru_stime;
438 	if (!USERMODE(frame.f_sr))
439 		panic("syscall");
440 	u.u_ar0 = frame.f_regs;
441 	opc = frame.f_pc - 2;
442 	systab = sysent;
443 	numsys = nsysent;
444 #ifdef HPUXCOMPAT
445 	if (p->p_flag & SHPUX) {
446 		systab = hpuxsysent;
447 		numsys = hpuxnsysent;
448 	}
449 #endif
450 	params = (caddr_t)frame.f_regs[SP] + NBPW;
451 	if (code == 0) {			/* indir */
452 		code = fuword(params);
453 		params += NBPW;
454 	}
455 	if (code >= numsys)
456 		callp = &systab[0];		/* indir (illegal) */
457 	else
458 		callp = &systab[code];
459 	if ((i = callp->sy_narg * sizeof (int)) &&
460 	    (error = copyin(params, (caddr_t)&args, (u_int)i))) {
461 #ifdef HPUXCOMPAT
462 		if (p->p_flag & SHPUX)
463 			error = bsdtohpuxerrno(error);
464 #endif
465 		frame.f_regs[D0] = (u_char) error;
466 		frame.f_sr |= PSL_C;	/* carry bit */
467 #ifdef KTRACE
468 		if (KTRPOINT(p, KTR_SYSCALL))
469 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
470 #endif
471 		goto done;
472 	}
473 #ifdef KTRACE
474 	if (KTRPOINT(p, KTR_SYSCALL))
475 		ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
476 #endif
477 	rval[0] = 0;
478 	rval[1] = frame.f_regs[D1];
479 #ifdef HPUXCOMPAT
480 	/* debug kludge */
481 	if (callp->sy_call == notimp)
482 		error = notimp(u.u_procp, args.i, rval, code, callp->sy_narg);
483 	else
484 #endif
485 	error = (*callp->sy_call)(u.u_procp, &args, rval);
486 	if (error == ERESTART)
487 		frame.f_pc = opc;
488 	else if (error != EJUSTRETURN) {
489 		if (error) {
490 #ifdef HPUXCOMPAT
491 			if (p->p_flag & SHPUX)
492 				error = bsdtohpuxerrno(error);
493 #endif
494 			frame.f_regs[D0] = (u_char) error;
495 			frame.f_sr |= PSL_C;	/* carry bit */
496 		} else {
497 			frame.f_regs[D0] = rval[0];
498 			frame.f_regs[D1] = rval[1];
499 			frame.f_sr &= ~PSL_C;
500 		}
501 	}
502 	/* else if (error == EJUSTRETURN) */
503 		/* nothing to do */
504 
505 done:
506 	/*
507 	 * Reinitialize proc pointer `p' as it may be different
508 	 * if this is a child returning from fork syscall.
509 	 */
510 	p = u.u_procp;
511 	/*
512 	 * XXX the check for sigreturn ensures that we don't
513 	 * attempt to set up a call to a signal handler (sendsig) before
514 	 * we have cleaned up the stack from the last call (sigreturn).
515 	 * Allowing this seems to lock up the machine in certain scenarios.
516 	 * What should really be done is to clean up the signal handling
517 	 * so that this is not a problem.
518 	 */
519 #include "syscall.h"
520 	if (code != SYS_sigreturn && (i = CURSIG(p)))
521 		psig(i);
522 	p->p_pri = p->p_usrpri;
523 	if (runrun) {
524 		/*
525 		 * Since we are u.u_procp, clock will normally just change
526 		 * our priority without moving us from one queue to another
527 		 * (since the running process is not on a queue.)
528 		 * If that happened after we setrq ourselves but before we
529 		 * swtch()'ed, we might not be on the queue indicated by
530 		 * our priority.
531 		 */
532 		(void) splclock();
533 		setrq(p);
534 		u.u_ru.ru_nivcsw++;
535 		swtch();
536 		if (code != SYS_sigreturn && (i = CURSIG(p)))
537 			psig(i);
538 	}
539 	if (u.u_prof.pr_scale) {
540 		int ticks;
541 		struct timeval *tv = &u.u_ru.ru_stime;
542 
543 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
544 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
545 		if (ticks) {
546 #ifdef PROFTIMER
547 			extern int profscale;
548 			addupc(frame.f_pc, &u.u_prof, ticks * profscale);
549 #else
550 			addupc(frame.f_pc, &u.u_prof, ticks);
551 #endif
552 		}
553 	}
554 	curpri = p->p_pri;
555 #ifdef KTRACE
556 	if (KTRPOINT(p, KTR_SYSRET))
557 		ktrsysret(p->p_tracep, code, error, rval[0]);
558 #endif
559 }
560