xref: /original-bsd/sys/hp300/hp300/trap.c (revision 155be1a3)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.17 (Berkeley) 11/20/91
15  */
16 
17 #include "param.h"
18 #include "systm.h"
19 #include "proc.h"
20 #include "acct.h"
21 #include "kernel.h"
22 #include "signalvar.h"
23 #include "resourcevar.h"
24 #include "syslog.h"
25 #include "user.h"
26 #ifdef KTRACE
27 #include "ktrace.h"
28 #endif
29 
30 #include "../include/psl.h"
31 #include "../include/trap.h"
32 #include "../include/cpu.h"
33 #include "../include/reg.h"
34 #include "../include/mtpr.h"
35 
36 #include "vm/vm.h"
37 #include "vm/pmap.h"
38 
39 #ifdef HPUXCOMPAT
40 #include "../hpux/hpux.h"
41 #endif
42 
43 struct	sysent	sysent[];
44 int	nsysent;
45 
46 char	*trap_type[] = {
47 	"Bus error",
48 	"Address error",
49 	"Illegal instruction",
50 	"Zero divide",
51 	"CHK instruction",
52 	"TRAPV instruction",
53 	"Privilege violation",
54 	"Trace trap",
55 	"MMU fault",
56 	"SSIR trap",
57 	"Format error",
58 	"68881 exception",
59 	"Coprocessor violation",
60 	"Async system trap"
61 };
62 #define	TRAP_TYPES	(sizeof trap_type / sizeof trap_type[0])
63 
64 /*
65  * Size of various exception stack frames (minus the standard 8 bytes)
66  */
67 short	exframesize[] = {
68 	FMT0SIZE,	/* type 0 - normal (68020/030/040) */
69 	FMT1SIZE,	/* type 1 - throwaway (68020/030/040) */
70 	FMT2SIZE,	/* type 2 - normal 6-word (68020/030/040) */
71 	-1,		/* type 3 - FP post-instruction (68040) */
72 	-1, -1, -1,	/* type 4-6 - undefined */
73 	-1,		/* type 7 - access error (68040) */
74 	58,		/* type 8 - bus fault (68010) */
75 	FMT9SIZE,	/* type 9 - coprocessor mid-instruction (68020/030) */
76 	FMTASIZE,	/* type A - short bus fault (68020/030) */
77 	FMTBSIZE,	/* type B - long bus fault (68020/030) */
78 	-1, -1, -1, -1	/* type C-F - undefined */
79 };
80 
81 #ifdef DEBUG
82 int mmudebug = 0;
83 #endif
84 
85 /*
86  * Trap is called from locore to handle most types of processor traps,
87  * including events such as simulated software interrupts/AST's.
88  * System calls are broken out for efficiency.
89  */
90 /*ARGSUSED*/
91 trap(type, code, v, frame)
92 	int type;
93 	unsigned code;
94 	register unsigned v;
95 	struct frame frame;
96 {
97 	register int i;
98 	unsigned ucode = 0;
99 	register struct proc *p = curproc;
100 	struct timeval syst;
101 	unsigned ncode;
102 	int s;
103 
104 	cnt.v_trap++;
105 	syst = p->p_stime;
106 	if (USERMODE(frame.f_sr)) {
107 		type |= T_USER;
108 		p->p_regs = frame.f_regs;
109 	}
110 	switch (type) {
111 
112 	default:
113 dopanic:
114 		printf("trap type %d, code = %x, v = %x\n", type, code, v);
115 		regdump(frame.f_regs, 128);
116 		type &= ~T_USER;
117 		if ((unsigned)type < TRAP_TYPES)
118 			panic(trap_type[type]);
119 		panic("trap");
120 
121 	case T_BUSERR:		/* kernel bus error */
122 		if (!p->p_addr->u_pcb.pcb_onfault)
123 			goto dopanic;
124 		/*
125 		 * If we have arranged to catch this fault in any of the
126 		 * copy to/from user space routines, set PC to return to
127 		 * indicated location and set flag informing buserror code
128 		 * that it may need to clean up stack frame.
129 		 */
130 copyfault:
131 		frame.f_stackadj = exframesize[frame.f_format];
132 		frame.f_format = frame.f_vector = 0;
133 		frame.f_pc = (int) p->p_addr->u_pcb.pcb_onfault;
134 		return;
135 
136 	case T_BUSERR|T_USER:	/* bus error */
137 	case T_ADDRERR|T_USER:	/* address error */
138 		i = SIGBUS;
139 		break;
140 
141 #ifdef FPCOPROC
142 	case T_COPERR:		/* kernel coprocessor violation */
143 #endif
144 	case T_FMTERR:		/* kernel format error */
145 	/*
146 	 * The user has most likely trashed the RTE or FP state info
147 	 * in the stack frame of a signal handler.
148 	 */
149 		type |= T_USER;
150 		printf("pid %d: kernel %s exception\n", p->p_pid,
151 		       type==T_COPERR ? "coprocessor" : "format");
152 		p->p_sigacts->ps_sigact[SIGILL] = SIG_DFL;
153 		i = sigmask(SIGILL);
154 		p->p_sigignore &= ~i;
155 		p->p_sigcatch &= ~i;
156 		p->p_sigmask &= ~i;
157 		i = SIGILL;
158 		ucode = frame.f_format;	/* XXX was ILL_RESAD_FAULT */
159 		break;
160 
161 #ifdef FPCOPROC
162 	case T_COPERR|T_USER:	/* user coprocessor violation */
163 	/* What is a proper response here? */
164 		ucode = 0;
165 		i = SIGFPE;
166 		break;
167 
168 	case T_FPERR|T_USER:	/* 68881 exceptions */
169 	/*
170 	 * We pass along the 68881 status register which locore stashed
171 	 * in code for us.  Note that there is a possibility that the
172 	 * bit pattern of this register will conflict with one of the
173 	 * FPE_* codes defined in signal.h.  Fortunately for us, the
174 	 * only such codes we use are all in the range 1-7 and the low
175 	 * 3 bits of the status register are defined as 0 so there is
176 	 * no clash.
177 	 */
178 		ucode = code;
179 		i = SIGFPE;
180 		break;
181 #endif
182 
183 	case T_ILLINST|T_USER:	/* illegal instruction fault */
184 #ifdef HPUXCOMPAT
185 		if (p->p_flag & SHPUX) {
186 			ucode = HPUX_ILL_ILLINST_TRAP;
187 			i = SIGILL;
188 			break;
189 		}
190 		/* fall through */
191 #endif
192 	case T_PRIVINST|T_USER:	/* privileged instruction fault */
193 #ifdef HPUXCOMPAT
194 		if (p->p_flag & SHPUX)
195 			ucode = HPUX_ILL_PRIV_TRAP;
196 		else
197 #endif
198 		ucode = frame.f_format;	/* XXX was ILL_PRIVIN_FAULT */
199 		i = SIGILL;
200 		break;
201 
202 	case T_ZERODIV|T_USER:	/* Divide by zero */
203 #ifdef HPUXCOMPAT
204 		if (p->p_flag & SHPUX)
205 			ucode = HPUX_FPE_INTDIV_TRAP;
206 		else
207 #endif
208 		ucode = frame.f_format;	/* XXX was FPE_INTDIV_TRAP */
209 		i = SIGFPE;
210 		break;
211 
212 	case T_CHKINST|T_USER:	/* CHK instruction trap */
213 #ifdef HPUXCOMPAT
214 		if (p->p_flag & SHPUX) {
215 			/* handled differently under hp-ux */
216 			i = SIGILL;
217 			ucode = HPUX_ILL_CHK_TRAP;
218 			break;
219 		}
220 #endif
221 		ucode = frame.f_format;	/* XXX was FPE_SUBRNG_TRAP */
222 		i = SIGFPE;
223 		break;
224 
225 	case T_TRAPVINST|T_USER:	/* TRAPV instruction trap */
226 #ifdef HPUXCOMPAT
227 		if (p->p_flag & SHPUX) {
228 			/* handled differently under hp-ux */
229 			i = SIGILL;
230 			ucode = HPUX_ILL_TRAPV_TRAP;
231 			break;
232 		}
233 #endif
234 		ucode = frame.f_format;	/* XXX was FPE_INTOVF_TRAP */
235 		i = SIGFPE;
236 		break;
237 
238 	/*
239 	 * XXX: Trace traps are a nightmare.
240 	 *
241 	 *	HP-UX uses trap #1 for breakpoints,
242 	 *	HPBSD uses trap #2,
243 	 *	SUN 3.x uses trap #15,
244 	 *	KGDB uses trap #15 (for kernel breakpoints; handled elsewhere).
245 	 *
246 	 * HPBSD and HP-UX traps both get mapped by locore.s into T_TRACE.
247 	 * SUN 3.x traps get passed through as T_TRAP15 and are not really
248 	 * supported yet.
249 	 */
250 	case T_TRACE:		/* kernel trace trap */
251 	case T_TRAP15:		/* SUN trace trap */
252 		frame.f_sr &= ~PSL_T;
253 		i = SIGTRAP;
254 		break;
255 
256 	case T_TRACE|T_USER:	/* user trace trap */
257 	case T_TRAP15|T_USER:	/* SUN user trace trap */
258 		frame.f_sr &= ~PSL_T;
259 		i = SIGTRAP;
260 		break;
261 
262 	case T_ASTFLT:		/* system async trap, cannot happen */
263 		goto dopanic;
264 
265 	case T_ASTFLT|T_USER:	/* user async trap */
266 		astpending = 0;
267 		/*
268 		 * We check for software interrupts first.  This is because
269 		 * they are at a higher level than ASTs, and on a VAX would
270 		 * interrupt the AST.  We assume that if we are processing
271 		 * an AST that we must be at IPL0 so we don't bother to
272 		 * check.  Note that we ensure that we are at least at SIR
273 		 * IPL while processing the SIR.
274 		 */
275 		spl1();
276 		/* fall into... */
277 
278 	case T_SSIR:		/* software interrupt */
279 	case T_SSIR|T_USER:
280 		if (ssir & SIR_NET) {
281 			siroff(SIR_NET);
282 			cnt.v_soft++;
283 			netintr();
284 		}
285 		if (ssir & SIR_CLOCK) {
286 			siroff(SIR_CLOCK);
287 			cnt.v_soft++;
288 			softclock((caddr_t)frame.f_pc, (int)frame.f_sr);
289 		}
290 		/*
291 		 * If this was not an AST trap, we are all done.
292 		 */
293 		if (type != (T_ASTFLT|T_USER)) {
294 			cnt.v_trap--;
295 			return;
296 		}
297 		spl0();
298 #ifndef PROFTIMER
299 		if ((p->p_flag&SOWEUPC) && p->p_stats->p_prof.pr_scale) {
300 			addupc(frame.f_pc, &p->p_stats->p_prof, 1);
301 			p->p_flag &= ~SOWEUPC;
302 		}
303 #endif
304 		goto out;
305 
306 	case T_MMUFLT:		/* kernel mode page fault */
307 		/* fall into ... */
308 
309 	case T_MMUFLT|T_USER:	/* page fault */
310 	    {
311 		register vm_offset_t va;
312 		register struct vmspace *vm = p->p_vmspace;
313 		register vm_map_t map;
314 		int rv;
315 		vm_prot_t ftype;
316 		extern vm_map_t kernel_map;
317 
318 		/*
319 		 * It is only a kernel address space fault iff:
320 		 * 	1. (type & T_USER) == 0  and
321 		 * 	2. pcb_onfault not set or
322 		 *	3. pcb_onfault set but supervisor space data fault
323 		 * The last can occur during an exec() copyin where the
324 		 * argument space is lazy-allocated.
325 		 */
326 		if (type == T_MMUFLT &&
327 		    (!p->p_addr->u_pcb.pcb_onfault ||
328 		     (code & (SSW_DF|FC_SUPERD)) == (SSW_DF|FC_SUPERD)))
329 			map = kernel_map;
330 		else
331 			map = &vm->vm_map;
332 		if ((code & (SSW_DF|SSW_RW)) == SSW_DF)	/* what about RMW? */
333 			ftype = VM_PROT_READ | VM_PROT_WRITE;
334 		else
335 			ftype = VM_PROT_READ;
336 		va = trunc_page((vm_offset_t)v);
337 #ifdef DEBUG
338 		if (map == kernel_map && va == 0) {
339 			printf("trap: bad kernel access at %x\n", v);
340 			goto dopanic;
341 		}
342 #endif
343 		rv = vm_fault(map, va, ftype, FALSE);
344 		/*
345 		 * If this was a stack access we keep track of the maximum
346 		 * accessed stack size.  Also, if vm_fault gets a protection
347 		 * failure it is due to accessing the stack region outside
348 		 * the current limit and we need to reflect that as an access
349 		 * error.
350 		 */
351 		if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map) {
352 			if (rv == KERN_SUCCESS) {
353 				unsigned nss;
354 
355 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
356 				if (nss > vm->vm_ssize)
357 					vm->vm_ssize = nss;
358 			} else if (rv == KERN_PROTECTION_FAILURE)
359 				rv = KERN_INVALID_ADDRESS;
360 		}
361 		if (rv == KERN_SUCCESS) {
362 			if (type == T_MMUFLT)
363 				return;
364 			goto out;
365 		}
366 		if (type == T_MMUFLT) {
367 			if (p->p_addr->u_pcb.pcb_onfault)
368 				goto copyfault;
369 			printf("vm_fault(%x, %x, %x, 0) -> %x\n",
370 			       map, va, ftype, rv);
371 			printf("  type %x, code [mmu,,ssw]: %x\n",
372 			       type, code);
373 			goto dopanic;
374 		}
375 		ucode = v;
376 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
377 		break;
378 	    }
379 	}
380 	trapsignal(p, i, ucode);
381 	if ((type & T_USER) == 0)
382 		return;
383 out:
384 	while (i = CURSIG(p))
385 		psig(i);
386 	p->p_pri = p->p_usrpri;
387 	if (want_resched) {
388 		/*
389 		 * Since we are curproc, clock will normally just change
390 		 * our priority without moving us from one queue to another
391 		 * (since the running process is not on a queue.)
392 		 * If that happened after we setrq ourselves but before we
393 		 * swtch()'ed, we might not be on the queue indicated by
394 		 * our priority.
395 		 */
396 		s = splclock();
397 		setrq(p);
398 		p->p_stats->p_ru.ru_nivcsw++;
399 		swtch();
400 		splx(s);
401 		while (i = CURSIG(p))
402 			psig(i);
403 	}
404 	if (p->p_stats->p_prof.pr_scale) {
405 		int ticks;
406 		struct timeval *tv = &p->p_stime;
407 
408 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
409 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
410 		if (ticks) {
411 #ifdef PROFTIMER
412 			extern int profscale;
413 			addupc(frame.f_pc, &p->p_stats->p_prof,
414 			    ticks * profscale);
415 #else
416 			addupc(frame.f_pc, &p->p_stats->p_prof, ticks);
417 #endif
418 		}
419 	}
420 	curpri = p->p_pri;
421 }
422 
423 /*
424  * Proces a system call.
425  */
426 syscall(code, frame)
427 	volatile int code;
428 	struct frame frame;
429 {
430 	register caddr_t params;
431 	register int i;
432 	register struct sysent *callp;
433 	register struct proc *p = curproc;
434 	int error, opc, numsys, s;
435 	struct args {
436 		int i[8];
437 	} args;
438 	int rval[2];
439 	struct timeval syst;
440 	struct sysent *systab;
441 #ifdef HPUXCOMPAT
442 	extern struct sysent hpuxsysent[];
443 	extern int hpuxnsysent, notimp();
444 #endif
445 
446 	cnt.v_syscall++;
447 	syst = p->p_stime;
448 	if (!USERMODE(frame.f_sr))
449 		panic("syscall");
450 	p->p_regs = frame.f_regs;
451 	opc = frame.f_pc - 2;
452 	systab = sysent;
453 	numsys = nsysent;
454 #ifdef HPUXCOMPAT
455 	if (p->p_flag & SHPUX) {
456 		systab = hpuxsysent;
457 		numsys = hpuxnsysent;
458 	}
459 #endif
460 	params = (caddr_t)frame.f_regs[SP] + sizeof(int);
461 	if (code == 0) {			/* indir */
462 		code = fuword(params);
463 		params += sizeof(int);
464 	}
465 	if (code >= numsys)
466 		callp = &systab[0];		/* indir (illegal) */
467 	else
468 		callp = &systab[code];
469 	if ((i = callp->sy_narg * sizeof (int)) &&
470 	    (error = copyin(params, (caddr_t)&args, (u_int)i))) {
471 #ifdef HPUXCOMPAT
472 		if (p->p_flag & SHPUX)
473 			error = bsdtohpuxerrno(error);
474 #endif
475 		frame.f_regs[D0] = error;
476 		frame.f_sr |= PSL_C;	/* carry bit */
477 #ifdef KTRACE
478 		if (KTRPOINT(p, KTR_SYSCALL))
479 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
480 #endif
481 		goto done;
482 	}
483 #ifdef KTRACE
484 	if (KTRPOINT(p, KTR_SYSCALL))
485 		ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
486 #endif
487 	rval[0] = 0;
488 	rval[1] = frame.f_regs[D1];
489 #ifdef HPUXCOMPAT
490 	/* debug kludge */
491 	if (callp->sy_call == notimp)
492 		error = notimp(p, args.i, rval, code, callp->sy_narg);
493 	else
494 #endif
495 	error = (*callp->sy_call)(p, &args, rval);
496 	if (error == ERESTART)
497 		frame.f_pc = opc;
498 	else if (error != EJUSTRETURN) {
499 		if (error) {
500 #ifdef HPUXCOMPAT
501 			if (p->p_flag & SHPUX)
502 				error = bsdtohpuxerrno(error);
503 #endif
504 			frame.f_regs[D0] = error;
505 			frame.f_sr |= PSL_C;	/* carry bit */
506 		} else {
507 			frame.f_regs[D0] = rval[0];
508 			frame.f_regs[D1] = rval[1];
509 			frame.f_sr &= ~PSL_C;
510 		}
511 	}
512 	/* else if (error == EJUSTRETURN) */
513 		/* nothing to do */
514 
515 done:
516 	/*
517 	 * Reinitialize proc pointer `p' as it may be different
518 	 * if this is a child returning from fork syscall.
519 	 */
520 	p = curproc;
521 	while (i = CURSIG(p))
522 		psig(i);
523 	p->p_pri = p->p_usrpri;
524 	if (want_resched) {
525 		/*
526 		 * Since we are curproc, clock will normally just change
527 		 * our priority without moving us from one queue to another
528 		 * (since the running process is not on a queue.)
529 		 * If that happened after we setrq ourselves but before we
530 		 * swtch()'ed, we might not be on the queue indicated by
531 		 * our priority.
532 		 */
533 		s = splclock();
534 		setrq(p);
535 		p->p_stats->p_ru.ru_nivcsw++;
536 		swtch();
537 		splx(s);
538 		while (i = CURSIG(p))
539 			psig(i);
540 	}
541 	if (p->p_stats->p_prof.pr_scale) {
542 		int ticks;
543 		struct timeval *tv = &p->p_stime;
544 
545 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
546 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
547 		if (ticks) {
548 #ifdef PROFTIMER
549 			extern int profscale;
550 			addupc(frame.f_pc, &p->p_stats->p_prof,
551 			    ticks * profscale);
552 #else
553 			addupc(frame.f_pc, &p->p_stats->p_prof, ticks);
554 #endif
555 		}
556 	}
557 	curpri = p->p_pri;
558 #ifdef KTRACE
559 	if (KTRPOINT(p, KTR_SYSRET))
560 		ktrsysret(p->p_tracep, code, error, rval[0]);
561 #endif
562 }
563