xref: /original-bsd/sys/hp300/hp300/trap.c (revision 7f64fca7)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: trap.c 1.32 91/04/06$
13  *
14  *	@(#)trap.c	7.14.1.2 (Berkeley) 05/28/91
15  */
16 
17 #include "param.h"
18 #include "systm.h"
19 #include "proc.h"
20 #include "seg.h"
21 #include "acct.h"
22 #include "kernel.h"
23 #include "signalvar.h"
24 #include "resourcevar.h"
25 #include "syslog.h"
26 #include "user.h"
27 #ifdef KTRACE
28 #include "ktrace.h"
29 #endif
30 
31 #include "../include/psl.h"
32 #include "../include/trap.h"
33 #include "../include/cpu.h"
34 #include "../include/reg.h"
35 #include "../include/mtpr.h"
36 
37 #include "vm/vm.h"
38 #include "vm/pmap.h"
39 #include "vmmeter.h"
40 
41 #ifdef HPUXCOMPAT
42 #include "../hpux/hpux.h"
43 #endif
44 
45 struct	sysent	sysent[];
46 int	nsysent;
47 
48 char	*trap_type[] = {
49 	"Bus error",
50 	"Address error",
51 	"Illegal instruction",
52 	"Zero divide",
53 	"CHK instruction",
54 	"TRAPV instruction",
55 	"Privilege violation",
56 	"Trace trap",
57 	"MMU fault",
58 	"SSIR trap",
59 	"Format error",
60 	"68881 exception",
61 	"Coprocessor violation",
62 	"Async system trap"
63 };
64 #define	TRAP_TYPES	(sizeof trap_type / sizeof trap_type[0])
65 
66 /*
67  * Size of various exception stack frames (minus the standard 8 bytes)
68  */
69 short	exframesize[] = {
70 	FMT0SIZE,	/* type 0 - normal (68020/030/040) */
71 	FMT1SIZE,	/* type 1 - throwaway (68020/030/040) */
72 	FMT2SIZE,	/* type 2 - normal 6-word (68020/030/040) */
73 	-1,		/* type 3 - FP post-instruction (68040) */
74 	-1, -1, -1,	/* type 4-6 - undefined */
75 	-1,		/* type 7 - access error (68040) */
76 	58,		/* type 8 - bus fault (68010) */
77 	FMT9SIZE,	/* type 9 - coprocessor mid-instruction (68020/030) */
78 	FMTASIZE,	/* type A - short bus fault (68020/030) */
79 	FMTBSIZE,	/* type B - long bus fault (68020/030) */
80 	-1, -1, -1, -1	/* type C-F - undefined */
81 };
82 
83 #ifdef DEBUG
84 int mmudebug = 0;
85 #endif
86 
87 /*
88  * Trap is called from locore to handle most types of processor traps,
89  * including events such as simulated software interrupts/AST's.
90  * System calls are broken out for efficiency.
91  */
92 /*ARGSUSED*/
93 trap(type, code, v, frame)
94 	int type;
95 	unsigned code;
96 	register unsigned v;
97 	struct frame frame;
98 {
99 	register int i;
100 	unsigned ucode = 0;
101 	register struct proc *p = curproc;
102 	struct timeval syst;
103 	unsigned ncode;
104 
105 	cnt.v_trap++;
106 	syst = p->p_stime;
107 	if (USERMODE(frame.f_sr)) {
108 		type |= T_USER;
109 		p->p_regs = frame.f_regs;
110 	}
111 	switch (type) {
112 
113 	default:
114 dopanic:
115 		printf("trap type %d, code = %x, v = %x\n", type, code, v);
116 		regdump(frame.f_regs, 128);
117 		type &= ~T_USER;
118 		if ((unsigned)type < TRAP_TYPES)
119 			panic(trap_type[type]);
120 		panic("trap");
121 
122 	case T_BUSERR:		/* kernel bus error */
123 		if (!p->p_addr->u_pcb.pcb_onfault)
124 			goto dopanic;
125 		/*
126 		 * If we have arranged to catch this fault in any of the
127 		 * copy to/from user space routines, set PC to return to
128 		 * indicated location and set flag informing buserror code
129 		 * that it may need to clean up stack frame.
130 		 */
131 copyfault:
132 		frame.f_stackadj = exframesize[frame.f_format];
133 		frame.f_format = frame.f_vector = 0;
134 		frame.f_pc = (int) p->p_addr->u_pcb.pcb_onfault;
135 		return;
136 
137 	case T_BUSERR|T_USER:	/* bus error */
138 	case T_ADDRERR|T_USER:	/* address error */
139 		i = SIGBUS;
140 		break;
141 
142 #ifdef FPCOPROC
143 	case T_COPERR:		/* kernel coprocessor violation */
144 #endif
145 	case T_FMTERR:		/* kernel format error */
146 	/*
147 	 * The user has most likely trashed the RTE or FP state info
148 	 * in the stack frame of a signal handler.
149 	 */
150 		type |= T_USER;
151 		printf("pid %d: kernel %s exception\n", p->p_pid,
152 		       type==T_COPERR ? "coprocessor" : "format");
153 		p->p_sigacts->ps_sigact[SIGILL] = SIG_DFL;
154 		i = sigmask(SIGILL);
155 		p->p_sigignore &= ~i;
156 		p->p_sigcatch &= ~i;
157 		p->p_sigmask &= ~i;
158 		i = SIGILL;
159 		ucode = frame.f_format;	/* XXX was ILL_RESAD_FAULT */
160 		break;
161 
162 #ifdef FPCOPROC
163 	case T_COPERR|T_USER:	/* user coprocessor violation */
164 	/* What is a proper response here? */
165 		ucode = 0;
166 		i = SIGFPE;
167 		break;
168 
169 	case T_FPERR|T_USER:	/* 68881 exceptions */
170 	/*
171 	 * We pass along the 68881 status register which locore stashed
172 	 * in code for us.  Note that there is a possibility that the
173 	 * bit pattern of this register will conflict with one of the
174 	 * FPE_* codes defined in signal.h.  Fortunately for us, the
175 	 * only such codes we use are all in the range 1-7 and the low
176 	 * 3 bits of the status register are defined as 0 so there is
177 	 * no clash.
178 	 */
179 		ucode = code;
180 		i = SIGFPE;
181 		break;
182 #endif
183 
184 	case T_ILLINST|T_USER:	/* illegal instruction fault */
185 #ifdef HPUXCOMPAT
186 		if (p->p_flag & SHPUX) {
187 			ucode = HPUX_ILL_ILLINST_TRAP;
188 			i = SIGILL;
189 			break;
190 		}
191 		/* fall through */
192 #endif
193 	case T_PRIVINST|T_USER:	/* privileged instruction fault */
194 #ifdef HPUXCOMPAT
195 		if (p->p_flag & SHPUX)
196 			ucode = HPUX_ILL_PRIV_TRAP;
197 		else
198 #endif
199 		ucode = frame.f_format;	/* XXX was ILL_PRIVIN_FAULT */
200 		i = SIGILL;
201 		break;
202 
203 	case T_ZERODIV|T_USER:	/* Divide by zero */
204 #ifdef HPUXCOMPAT
205 		if (p->p_flag & SHPUX)
206 			ucode = HPUX_FPE_INTDIV_TRAP;
207 		else
208 #endif
209 		ucode = frame.f_format;	/* XXX was FPE_INTDIV_TRAP */
210 		i = SIGFPE;
211 		break;
212 
213 	case T_CHKINST|T_USER:	/* CHK instruction trap */
214 #ifdef HPUXCOMPAT
215 		if (p->p_flag & SHPUX) {
216 			/* handled differently under hp-ux */
217 			i = SIGILL;
218 			ucode = HPUX_ILL_CHK_TRAP;
219 			break;
220 		}
221 #endif
222 		ucode = frame.f_format;	/* XXX was FPE_SUBRNG_TRAP */
223 		i = SIGFPE;
224 		break;
225 
226 	case T_TRAPVINST|T_USER:	/* TRAPV instruction trap */
227 #ifdef HPUXCOMPAT
228 		if (p->p_flag & SHPUX) {
229 			/* handled differently under hp-ux */
230 			i = SIGILL;
231 			ucode = HPUX_ILL_TRAPV_TRAP;
232 			break;
233 		}
234 #endif
235 		ucode = frame.f_format;	/* XXX was FPE_INTOVF_TRAP */
236 		i = SIGFPE;
237 		break;
238 
239 	/*
240 	 * XXX: Trace traps are a nightmare.
241 	 *
242 	 *	HP-UX uses trap #1 for breakpoints,
243 	 *	HPBSD uses trap #2,
244 	 *	SUN 3.x uses trap #15,
245 	 *	KGDB uses trap #15 (for kernel breakpoints; handled elsewhere).
246 	 *
247 	 * HPBSD and HP-UX traps both get mapped by locore.s into T_TRACE.
248 	 * SUN 3.x traps get passed through as T_TRAP15 and are not really
249 	 * supported yet.
250 	 */
251 	case T_TRACE:		/* kernel trace trap */
252 	case T_TRAP15:		/* SUN trace trap */
253 		frame.f_sr &= ~PSL_T;
254 		i = SIGTRAP;
255 		break;
256 
257 	case T_TRACE|T_USER:	/* user trace trap */
258 	case T_TRAP15|T_USER:	/* SUN user trace trap */
259 		frame.f_sr &= ~PSL_T;
260 		i = SIGTRAP;
261 		break;
262 
263 	case T_ASTFLT:		/* system async trap, cannot happen */
264 		goto dopanic;
265 
266 	case T_ASTFLT|T_USER:	/* user async trap */
267 		astpending = 0;
268 		/*
269 		 * We check for software interrupts first.  This is because
270 		 * they are at a higher level than ASTs, and on a VAX would
271 		 * interrupt the AST.  We assume that if we are processing
272 		 * an AST that we must be at IPL0 so we don't bother to
273 		 * check.  Note that we ensure that we are at least at SIR
274 		 * IPL while processing the SIR.
275 		 */
276 		spl1();
277 		/* fall into... */
278 
279 	case T_SSIR:		/* software interrupt */
280 	case T_SSIR|T_USER:
281 		if (ssir & SIR_NET) {
282 			siroff(SIR_NET);
283 			cnt.v_soft++;
284 			netintr();
285 		}
286 		if (ssir & SIR_CLOCK) {
287 			siroff(SIR_CLOCK);
288 			cnt.v_soft++;
289 			softclock((caddr_t)frame.f_pc, (int)frame.f_sr);
290 		}
291 		/*
292 		 * If this was not an AST trap, we are all done.
293 		 */
294 		if (type != T_ASTFLT|T_USER) {
295 			cnt.v_trap--;
296 			return;
297 		}
298 		spl0();
299 #ifndef PROFTIMER
300 		if ((p->p_flag&SOWEUPC) && p->p_stats->p_prof.pr_scale) {
301 			addupc(frame.f_pc, &p->p_stats->p_prof, 1);
302 			p->p_flag &= ~SOWEUPC;
303 		}
304 #endif
305 		goto out;
306 
307 	case T_MMUFLT:		/* kernel mode page fault */
308 		/* fall into ... */
309 
310 	case T_MMUFLT|T_USER:	/* page fault */
311 	    {
312 		register vm_offset_t va;
313 		register struct vmspace *vm = p->p_vmspace;
314 		register vm_map_t map;
315 		int rv;
316 		vm_prot_t ftype;
317 		extern vm_map_t kernel_map;
318 
319 		/*
320 		 * It is only a kernel address space fault iff:
321 		 * 	1. (type & T_USER) == 0  and
322 		 * 	2. pcb_onfault not set or
323 		 *	3. pcb_onfault set but supervisor space data fault
324 		 * The last can occur during an exec() copyin where the
325 		 * argument space is lazy-allocated.
326 		 */
327 		if (type == T_MMUFLT &&
328 		    (!p->p_addr->u_pcb.pcb_onfault ||
329 		     (code & (SSW_DF|FC_SUPERD)) == (SSW_DF|FC_SUPERD)))
330 			map = kernel_map;
331 		else
332 			map = &vm->vm_map;
333 		if ((code & (SSW_DF|SSW_RW)) == SSW_DF)	/* what about RMW? */
334 			ftype = VM_PROT_READ | VM_PROT_WRITE;
335 		else
336 			ftype = VM_PROT_READ;
337 		va = trunc_page((vm_offset_t)v);
338 #ifdef DEBUG
339 		if (map == kernel_map && va == 0) {
340 			printf("trap: bad kernel access at %x\n", v);
341 			goto dopanic;
342 		}
343 #endif
344 		rv = vm_fault(map, va, ftype, FALSE);
345 		/*
346 		 * If this was a stack access we keep track of the maximum
347 		 * accessed stack size.  Also, if vm_fault gets a protection
348 		 * failure it is due to accessing the stack region outside
349 		 * the current limit and we need to reflect that as an access
350 		 * error.
351 		 */
352 		if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map) {
353 			if (rv == KERN_SUCCESS) {
354 				unsigned nss;
355 
356 				nss = clrnd(btoc(USRSTACK-(unsigned)va));
357 				if (nss > vm->vm_ssize)
358 					vm->vm_ssize = nss;
359 			} else if (rv == KERN_PROTECTION_FAILURE)
360 				rv = KERN_INVALID_ADDRESS;
361 		}
362 		if (rv == KERN_SUCCESS) {
363 			if (type == T_MMUFLT)
364 				return;
365 			goto out;
366 		}
367 		if (type == T_MMUFLT) {
368 			if (p->p_addr->u_pcb.pcb_onfault)
369 				goto copyfault;
370 			printf("vm_fault(%x, %x, %x, 0) -> %x\n",
371 			       map, va, ftype, rv);
372 			printf("  type %x, code [mmu,,ssw]: %x\n",
373 			       type, code);
374 			goto dopanic;
375 		}
376 		ucode = v;
377 		i = (rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV;
378 		break;
379 	    }
380 	}
381 	trapsignal(p, i, ucode);
382 	if ((type & T_USER) == 0)
383 		return;
384 out:
385 	while (i = CURSIG(p))
386 		psig(i);
387 	p->p_pri = p->p_usrpri;
388 	if (want_resched) {
389 		/*
390 		 * Since we are curproc, clock will normally just change
391 		 * our priority without moving us from one queue to another
392 		 * (since the running process is not on a queue.)
393 		 * If that happened after we setrq ourselves but before we
394 		 * swtch()'ed, we might not be on the queue indicated by
395 		 * our priority.
396 		 */
397 		(void) splclock();
398 		setrq(p);
399 		p->p_stats->p_ru.ru_nivcsw++;
400 		swtch();
401 		while (i = CURSIG(p))
402 			psig(i);
403 	}
404 	if (p->p_stats->p_prof.pr_scale) {
405 		int ticks;
406 		struct timeval *tv = &p->p_stime;
407 
408 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
409 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
410 		if (ticks) {
411 #ifdef PROFTIMER
412 			extern int profscale;
413 			addupc(frame.f_pc, &p->p_stats->p_prof,
414 			    ticks * profscale);
415 #else
416 			addupc(frame.f_pc, &p->p_stats->p_prof, ticks);
417 #endif
418 		}
419 	}
420 	curpri = p->p_pri;
421 }
422 
423 /*
424  * Proces a system call.
425  */
426 syscall(code, frame)
427 	volatile int code;
428 	struct frame frame;
429 {
430 	register caddr_t params;
431 	register int i;
432 	register struct sysent *callp;
433 	register struct proc *p = curproc;
434 	int error, opc, numsys;
435 	struct args {
436 		int i[8];
437 	} args;
438 	int rval[2];
439 	struct timeval syst;
440 	struct sysent *systab;
441 #ifdef HPUXCOMPAT
442 	extern struct sysent hpuxsysent[];
443 	extern int hpuxnsysent, notimp();
444 #endif
445 
446 	cnt.v_syscall++;
447 	syst = p->p_stime;
448 	if (!USERMODE(frame.f_sr))
449 		panic("syscall");
450 	p->p_regs = frame.f_regs;
451 	opc = frame.f_pc - 2;
452 	systab = sysent;
453 	numsys = nsysent;
454 #ifdef HPUXCOMPAT
455 	if (p->p_flag & SHPUX) {
456 		systab = hpuxsysent;
457 		numsys = hpuxnsysent;
458 	}
459 #endif
460 	params = (caddr_t)frame.f_regs[SP] + sizeof(int);
461 	if (code == 0) {			/* indir */
462 		code = fuword(params);
463 		params += sizeof(int);
464 	}
465 	if (code >= numsys)
466 		callp = &systab[0];		/* indir (illegal) */
467 	else
468 		callp = &systab[code];
469 	if ((i = callp->sy_narg * sizeof (int)) &&
470 	    (error = copyin(params, (caddr_t)&args, (u_int)i))) {
471 #ifdef HPUXCOMPAT
472 		if (p->p_flag & SHPUX)
473 			error = bsdtohpuxerrno(error);
474 #endif
475 		frame.f_regs[D0] = error;
476 		frame.f_sr |= PSL_C;	/* carry bit */
477 #ifdef KTRACE
478 		if (KTRPOINT(p, KTR_SYSCALL))
479 			ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
480 #endif
481 		goto done;
482 	}
483 #ifdef KTRACE
484 	if (KTRPOINT(p, KTR_SYSCALL))
485 		ktrsyscall(p->p_tracep, code, callp->sy_narg, args.i);
486 #endif
487 	rval[0] = 0;
488 	rval[1] = frame.f_regs[D1];
489 #ifdef HPUXCOMPAT
490 	/* debug kludge */
491 	if (callp->sy_call == notimp)
492 		error = notimp(p, args.i, rval, code, callp->sy_narg);
493 	else
494 #endif
495 	error = (*callp->sy_call)(p, &args, rval);
496 #ifdef DIAGNOSTIC
497 	if (curproc->p_spare[0])
498 		panic("syscall: M_NAMEI");
499 	if (curproc->p_spare[1])
500 		panic("syscall: STARTSAVE");
501 	if (curproc->p_spare[2])
502 		panic("syscall: LOCK COUNT");
503 #endif
504 	if (error == ERESTART)
505 		frame.f_pc = opc;
506 	else if (error != EJUSTRETURN) {
507 		if (error) {
508 #ifdef HPUXCOMPAT
509 			if (p->p_flag & SHPUX)
510 				error = bsdtohpuxerrno(error);
511 #endif
512 			frame.f_regs[D0] = error;
513 			frame.f_sr |= PSL_C;	/* carry bit */
514 		} else {
515 			frame.f_regs[D0] = rval[0];
516 			frame.f_regs[D1] = rval[1];
517 			frame.f_sr &= ~PSL_C;
518 		}
519 	}
520 	/* else if (error == EJUSTRETURN) */
521 		/* nothing to do */
522 
523 done:
524 	/*
525 	 * Reinitialize proc pointer `p' as it may be different
526 	 * if this is a child returning from fork syscall.
527 	 */
528 	p = curproc;
529 	while (i = CURSIG(p))
530 		psig(i);
531 	p->p_pri = p->p_usrpri;
532 	if (want_resched) {
533 		/*
534 		 * Since we are curproc, clock will normally just change
535 		 * our priority without moving us from one queue to another
536 		 * (since the running process is not on a queue.)
537 		 * If that happened after we setrq ourselves but before we
538 		 * swtch()'ed, we might not be on the queue indicated by
539 		 * our priority.
540 		 */
541 		(void) splclock();
542 		setrq(p);
543 		p->p_stats->p_ru.ru_nivcsw++;
544 		swtch();
545 		while (i = CURSIG(p))
546 			psig(i);
547 	}
548 	if (p->p_stats->p_prof.pr_scale) {
549 		int ticks;
550 		struct timeval *tv = &p->p_stime;
551 
552 		ticks = ((tv->tv_sec - syst.tv_sec) * 1000 +
553 			(tv->tv_usec - syst.tv_usec) / 1000) / (tick / 1000);
554 		if (ticks) {
555 #ifdef PROFTIMER
556 			extern int profscale;
557 			addupc(frame.f_pc, &p->p_stats->p_prof,
558 			    ticks * profscale);
559 #else
560 			addupc(frame.f_pc, &p->p_stats->p_prof, ticks);
561 #endif
562 		}
563 	}
564 	curpri = p->p_pri;
565 #ifdef KTRACE
566 	if (KTRPOINT(p, KTR_SYSRET))
567 		ktrsysret(p->p_tracep, code, error, rval[0]);
568 #endif
569 }
570