xref: /netbsd/sys/arch/sh3/sh3/exception.c (revision 6477d2de)
1 /*	$NetBSD: exception.c,v 1.74 2021/09/15 11:03:24 rin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc. All rights reserved.
5  * Copyright (c) 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the University of Utah, and William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)trap.c	7.4 (Berkeley) 5/13/91
36  */
37 
38 /*-
39  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
40  *
41  * This code is derived from software contributed to Berkeley by
42  * the University of Utah, and William Jolitz.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. All advertising materials mentioning features or use of this software
53  *    must display the following acknowledgement:
54  *	This product includes software developed by the University of
55  *	California, Berkeley and its contributors.
56  * 4. Neither the name of the University nor the names of its contributors
57  *    may be used to endorse or promote products derived from this software
58  *    without specific prior written permission.
59  *
60  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70  * SUCH DAMAGE.
71  *
72  *	@(#)trap.c	7.4 (Berkeley) 5/13/91
73  */
74 
75 /*
76  * SH3 Trap and System call handling
77  *
78  * T.Horiuchi 1998.06.8
79  */
80 
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: exception.c,v 1.74 2021/09/15 11:03:24 rin Exp $");
83 
84 #include "opt_ddb.h"
85 #include "opt_kgdb.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/proc.h>
91 #include <sys/signal.h>
92 #include <sys/intr.h>
93 
94 #ifdef DDB
95 #include <sh3/db_machdep.h>
96 #endif
97 #ifdef KGDB
98 #include <sys/kgdb.h>
99 #endif
100 
101 #include <uvm/uvm_extern.h>
102 
103 #include <sh3/cpu.h>
104 #include <sh3/mmu.h>
105 #include <sh3/pcb.h>
106 #include <sh3/exception.h>
107 #include <sh3/userret.h>
108 
109 const char * const exp_type[] = {
110 	"--",					/* 0x000 (reset vector) */
111 	"--",					/* 0x020 (reset vector) */
112 	"TLB miss/invalid (load)",		/* 0x040 EXPEVT_TLB_MISS_LD */
113 	"TLB miss/invalid (store)",		/* 0x060 EXPEVT_TLB_MISS_ST */
114 	"initial page write",			/* 0x080 EXPEVT_TLB_MOD */
115 	"TLB protection violation (load)",	/* 0x0a0 EXPEVT_TLB_PROT_LD */
116 	"TLB protection violation (store)",	/* 0x0c0 EXPEVT_TLB_PROT_ST */
117 	"address error (load)",			/* 0x0e0 EXPEVT_ADDR_ERR_LD */
118 	"address error (store)",		/* 0x100 EXPEVT_ADDR_ERR_ST */
119 	"FPU",					/* 0x120 EXPEVT_FPU */
120 	"--",					/* 0x140 (reset vector) */
121 	"unconditional trap (TRAPA)",		/* 0x160 EXPEVT_TRAPA */
122 	"reserved instruction code exception",	/* 0x180 EXPEVT_RES_INST */
123 	"illegal slot instruction exception",	/* 0x1a0 EXPEVT_SLOT_INST */
124 	"--",					/* 0x1c0 (external interrupt) */
125 	"user break point trap",		/* 0x1e0 EXPEVT_BREAK */
126 };
127 const int exp_types = __arraycount(exp_type);
128 
129 void general_exception(struct lwp *, struct trapframe *, uint32_t);
130 void tlb_exception(struct lwp *, struct trapframe *, uint32_t);
131 void ast(struct lwp *, struct trapframe *);
132 
133 /*
134  * void general_exception(struct lwp *l, struct trapframe *tf):
135  *	l  ... curlwp when exception occur.
136  *	tf ... full user context.
137  *	va ... fault va for user mode EXPEVT_ADDR_ERR_{LD,ST}
138  */
139 void
general_exception(struct lwp * l,struct trapframe * tf,uint32_t va)140 general_exception(struct lwp *l, struct trapframe *tf, uint32_t va)
141 {
142 	int expevt = tf->tf_expevt;
143 	bool usermode = !KERNELMODE(tf->tf_ssr);
144 	struct pcb *pcb;
145 	ksiginfo_t ksi;
146 	uint32_t trapcode;
147 #ifdef DDB
148 	uint32_t code;
149 #endif
150 
151 	curcpu()->ci_data.cpu_ntrap++;
152 
153 	/*
154 	 * Read trap code from TRA before enabling interrupts,
155 	 * otherwise it can be clobbered by a ddb breakpoint in an
156 	 * interrupt handler.
157 	 */
158 	trapcode = _reg_read_4(SH_(TRA)) >> 2;
159 
160 	splx(tf->tf_ssr & PSL_IMASK);
161 
162 	if (l == NULL)
163  		goto do_panic;
164 
165 	if (usermode) {
166 		KDASSERT(l->l_md.md_regs == tf); /* check exception depth */
167 		expevt |= EXP_USER;
168 		LWP_CACHE_CREDS(l, l->l_proc);
169 	}
170 
171 	switch (expevt) {
172 	case EXPEVT_TRAPA | EXP_USER:
173 		/* Check for debugger break */
174 		if (trapcode == _SH_TRA_BREAK) {
175 			tf->tf_spc -= 2; /* back to the breakpoint address */
176 			KSI_INIT_TRAP(&ksi);
177 			ksi.ksi_signo = SIGTRAP;
178 			ksi.ksi_code = TRAP_BRKPT;
179 			ksi.ksi_addr = (void *)tf->tf_spc;
180 			goto trapsignal;
181 		} else {
182 			/* XXX: we shouldn't treat *any* TRAPA as a syscall */
183 			(*l->l_proc->p_md.md_syscall)(l, tf);
184 			return;
185 		}
186 		break;
187 
188 	case EXPEVT_BREAK | EXP_USER:
189 		l->l_md.md_flags &= ~MDL_SSTEP;
190 		KSI_INIT_TRAP(&ksi);
191 		ksi.ksi_signo = SIGTRAP;
192 		ksi.ksi_code = TRAP_TRACE;
193 		ksi.ksi_addr = (void *)tf->tf_spc;
194 		goto trapsignal;
195 
196 	case EXPEVT_ADDR_ERR_LD: /* FALLTHROUGH */
197 	case EXPEVT_ADDR_ERR_ST:
198 		pcb = lwp_getpcb(l);
199 		if (__predict_false(pcb->pcb_onfault == NULL))
200 			goto do_panic;
201 		tf->tf_spc = (int)pcb->pcb_onfault;
202 		tf->tf_r0 = EFAULT;
203 		break;
204 
205 	case EXPEVT_ADDR_ERR_LD | EXP_USER: /* FALLTHROUGH */
206 	case EXPEVT_ADDR_ERR_ST | EXP_USER:
207 		KSI_INIT_TRAP(&ksi);
208 		if (((int)va) < 0) {
209 		    ksi.ksi_signo = SIGSEGV;
210 		    ksi.ksi_code = SEGV_ACCERR;
211 		} else {
212 		    ksi.ksi_signo = SIGBUS;
213 		    ksi.ksi_code = BUS_ADRALN;
214 		}
215 		ksi.ksi_addr = (void *)va;
216 		goto trapsignal;
217 
218 	case EXPEVT_RES_INST | EXP_USER: /* FALLTHROUGH */
219 	case EXPEVT_SLOT_INST | EXP_USER:
220 		KSI_INIT_TRAP(&ksi);
221 		ksi.ksi_signo = SIGILL;
222 		ksi.ksi_code = ILL_ILLOPC; /* XXX: could be ILL_PRVOPC */
223 		ksi.ksi_addr = (void *)tf->tf_spc;
224 		goto trapsignal;
225 
226 	default:
227 		goto do_panic;
228 	}
229 
230 	if (usermode)
231 		userret(l);
232 	return;
233 
234  trapsignal:
235 	KASSERT(usermode);
236 	ksi.ksi_trap = tf->tf_expevt;
237 	trapsignal(l, &ksi);
238 	userret(l);
239 	return;
240 
241  do_panic:
242 #ifdef DDB
243 	switch (expevt & ~EXP_USER) {
244 	case EXPEVT_TRAPA:
245 		code = trapcode;
246 		break;
247 	default:
248 		code = 0;
249 		break;
250 	}
251 	if (kdb_trap(expevt, code, tf))
252 		return;
253 #endif
254 #ifdef KGDB
255 	if (kgdb_trap(EXPEVT_BREAK, tf))
256 		return;
257 #endif
258 	if (expevt >> 5 < exp_types)
259 		printf("fatal %s", exp_type[expevt >> 5]);
260 	else
261 		printf("EXPEVT 0x%03x", expevt);
262 	printf(" in %s mode\n", usermode ? "user" : "kernel");
263 	printf(" spc %x ssr %x \n", tf->tf_spc, tf->tf_ssr);
264 
265 	panic("general_exception");
266 	/* NOTREACHED */
267 }
268 
269 
270 /*
271  * void tlb_exception(struct lwp *l, struct trapframe *tf, uint32_t va):
272  *	l  ... curlwp when exception occur.
273  *	tf ... full user context.
274  *	va ... fault address.
275  */
276 void
tlb_exception(struct lwp * l,struct trapframe * tf,uint32_t va)277 tlb_exception(struct lwp *l, struct trapframe *tf, uint32_t va)
278 {
279 	struct vm_map *map;
280 	struct pcb *pcb;
281 	pmap_t pmap;
282 	void *onfault;
283 	ksiginfo_t ksi;
284 	bool usermode;
285 	int err, track, ftype;
286 	const char *panic_msg;
287 
288 	pcb = lwp_getpcb(l);
289 	onfault = pcb->pcb_onfault;
290 
291 #define TLB_ASSERT(assert, msg)				\
292 		do {					\
293 			if (!(assert)) {		\
294 				panic_msg =  msg;	\
295 				goto tlb_panic;		\
296 			}				\
297 		} while(/*CONSTCOND*/0)
298 
299 	usermode = !KERNELMODE(tf->tf_ssr);
300 	if (usermode) {
301 		KDASSERT(l->l_md.md_regs == tf);
302 	} else {
303 #if 0 /* FIXME: probably wrong for yamt-idlelwp */
304 		KDASSERT(l == NULL ||		/* idle */
305 		    l == &lwp0 ||		/* kthread */
306 		    l->l_md.md_regs != tf);	/* other */
307 #endif
308 	}
309 
310 	switch (tf->tf_expevt) {
311 	case EXPEVT_TLB_MISS_LD:
312 		track = PVH_REFERENCED;
313 		ftype = VM_PROT_READ;
314 		break;
315 	case EXPEVT_TLB_MISS_ST:
316 		track = PVH_REFERENCED;
317 		ftype = VM_PROT_WRITE;
318 		break;
319 	case EXPEVT_TLB_MOD:
320 		track = PVH_REFERENCED | PVH_MODIFIED;
321 		ftype = VM_PROT_WRITE;
322 		break;
323 	case EXPEVT_TLB_PROT_LD:
324 		TLB_ASSERT((int)va > 0,
325 		    "kernel virtual protection fault (load)");
326 		if (usermode) {
327 			KSI_INIT_TRAP(&ksi);
328 			ksi.ksi_signo = SIGSEGV;
329 			ksi.ksi_code = SEGV_ACCERR;
330 			ksi.ksi_addr = (void *)va;
331 			splx(tf->tf_ssr & PSL_IMASK);
332 			LWP_CACHE_CREDS(l, l->l_proc);
333 			goto user_fault;
334 		} else {
335 			TLB_ASSERT(l && onfault != NULL,
336 			    "no copyin/out fault handler (load protection)");
337 			tf->tf_spc = (int)onfault;
338 			tf->tf_r0 = EFAULT;
339 		}
340 		return;
341 
342 	case EXPEVT_TLB_PROT_ST:
343 		track = 0;	/* call uvm_fault first. (COW) */
344 		ftype = VM_PROT_WRITE;
345 		break;
346 
347 	default:
348 		TLB_ASSERT(0, "impossible expevt");
349 	}
350 
351 	/* Select address space */
352 	if (usermode) {
353 		TLB_ASSERT(l != NULL, "no curlwp");
354 		map = &l->l_proc->p_vmspace->vm_map;
355 		pmap = map->pmap;
356 	} else {
357 		if ((int)va < 0) {
358 			map = kernel_map;
359 			pmap = pmap_kernel();
360 		} else {
361 			TLB_ASSERT(l != NULL && onfault != NULL,
362 			    "invalid user-space access from kernel mode");
363 			if (va == 0) {
364 				tf->tf_spc = (int)onfault;
365 				tf->tf_r0 = EFAULT;
366 				return;
367 			}
368 			map = &l->l_proc->p_vmspace->vm_map;
369 			pmap = map->pmap;
370 		}
371 	}
372 
373 	/* Lookup page table. if entry found, load it. */
374 	if (track && __pmap_pte_load(pmap, va, track)) {
375 		return;
376 	}
377 
378 	/* Page not found. call fault handler */
379 	splx(tf->tf_ssr & PSL_IMASK);
380 	if (usermode)
381 		LWP_CACHE_CREDS(l, l->l_proc);
382 	pcb->pcb_onfault = NULL;
383 	err = uvm_fault(map, va, ftype);
384 	pcb->pcb_onfault = onfault;
385 
386 	/* User stack extension */
387 	if (map != kernel_map &&
388 	    (va >= (vaddr_t)l->l_proc->p_vmspace->vm_maxsaddr) &&
389 	    (va <  (vaddr_t)l->l_proc->p_vmspace->vm_minsaddr)) {
390 		if (err == 0) {
391 			struct vmspace *vm = l->l_proc->p_vmspace;
392 			uint32_t nss;
393 			nss = btoc((vaddr_t)vm->vm_minsaddr - va);
394 			if (nss > vm->vm_ssize)
395 				vm->vm_ssize = nss;
396 		} else if (err == EACCES) {
397 			err = EFAULT;
398 		}
399 	}
400 
401 	/* Page in. load PTE to TLB. */
402 	if (err == 0) {
403 		bool loaded;
404 		if (usermode)
405 			userret(l);
406 		loaded = __pmap_pte_load(pmap, va, track);
407 #if 0
408 		/*
409 		 * XXXAD I don't think you should do this - consider
410 		 * a multithreaded program where another thread got
411 		 * switched to during UVM fault and it unmapped the
412 		 * page. I think you should just let the fault happen
413 		 * again.
414 		 */
415 		TLB_ASSERT(loaded, "page table entry not found");
416 #else
417 		__USE(loaded);
418 #endif
419 		return;
420 	}
421 
422 	/* Page not found. */
423 	if (usermode) {
424 		KSI_INIT_TRAP(&ksi);
425 		ksi.ksi_addr = (void *)va;
426 
427 		switch (err) {
428 		case ENOMEM:
429 			ksi.ksi_signo = SIGKILL;
430 			break;
431 		case EINVAL:
432 			ksi.ksi_signo = SIGBUS;
433 			ksi.ksi_code = BUS_ADRERR;
434 			break;
435 		case EACCES:
436 			ksi.ksi_signo = SIGSEGV;
437 			ksi.ksi_code = SEGV_ACCERR;
438 			break;
439 		default:
440 			ksi.ksi_signo = SIGSEGV;
441 			ksi.ksi_code = SEGV_MAPERR;
442 			break;
443 		}
444 		goto user_fault;
445 	} else {
446 		TLB_ASSERT(onfault,
447 		    "no copyin/out fault handler (page not found)");
448 		tf->tf_spc = (int)onfault;
449 		tf->tf_r0 = err;
450 	}
451 	return;
452 
453  user_fault:
454 	ksi.ksi_trap = tf->tf_expevt;
455 	trapsignal(l, &ksi);
456 	userret(l);
457 	return;
458 
459  tlb_panic:
460 	panic("tlb_exception: %s\n"
461 	      "expevt=%x va=%08x ssr=%08x spc=%08x lwp=%p onfault=%p",
462 	      panic_msg, tf->tf_expevt, va, tf->tf_ssr, tf->tf_spc,
463 	      l, pcb->pcb_onfault);
464 #undef	TLB_ASSERT
465 }
466 
467 
468 /*
469  * void ast(struct lwp *l, struct trapframe *tf):
470  *	l  ... curlwp when exception occur.
471  *	tf ... full user context.
472  *	This is called when exception return. if return from kernel to user,
473  *	handle asynchronous software traps and context switch if needed.
474  *	Interrupts are blocked on entry.
475  */
476 void
ast(struct lwp * l,struct trapframe * tf)477 ast(struct lwp *l, struct trapframe *tf)
478 {
479 	int s;
480 
481 	if (__predict_true(l->l_md.md_astpending == 0)) {
482 		return;
483 	}
484 	if (__predict_false(KERNELMODE(tf->tf_ssr))) {
485 		/* should not occur but leave it here to be safe */
486 		return;
487 	}
488 
489 	KDASSERT(l != NULL);
490 	KDASSERT(l->l_md.md_regs == tf);
491 
492 	s = tf->tf_ssr & PSL_IMASK;
493 	do {
494 		splx(s);
495 		/* userret() clears l_md.md_astpending */
496 		userret(l);
497 		s = splhigh();
498 	} while (__predict_false(l->l_md.md_astpending));
499 }
500