1 /*	$NetBSD: trap.c,v 1.241 2016/07/11 18:54:32 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department and Ralph Campbell.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * from: Utah Hdr: trap.c 1.32 91/04/06
37  *
38  *	@(#)trap.c	8.5 (Berkeley) 1/11/94
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.241 2016/07/11 18:54:32 skrll Exp $");
43 
44 #include "opt_cputype.h"	/* which mips CPU levels do we support? */
45 #include "opt_ddb.h"
46 #include "opt_kgdb.h"
47 #include "opt_multiprocessor.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/cpu.h>
53 #include <sys/proc.h>
54 #include <sys/ras.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscall.h>
57 #include <sys/buf.h>
58 #include <sys/ktrace.h>
59 #include <sys/kauth.h>
60 #include <sys/atomic.h>
61 
62 #include <mips/cache.h>
63 #include <mips/locore.h>
64 #include <mips/mips_opcode.h>
65 
66 #include <uvm/uvm.h>
67 
68 #include <mips/trap.h>
69 #include <mips/reg.h>
70 #include <mips/regnum.h>			/* symbolic register indices */
71 #include <mips/pcb.h>
72 #include <mips/pte.h>
73 #include <mips/psl.h>
74 #include <mips/userret.h>
75 
76 #ifdef DDB
77 #include <machine/db_machdep.h>
78 #include <ddb/db_sym.h>
79 #endif
80 
81 #ifdef KGDB
82 #include <sys/kgdb.h>
83 #endif
84 
85 const char * const trap_names[] = {
86 	"external interrupt",
87 	"TLB modification",
88 	"TLB miss (load or instr. fetch)",
89 	"TLB miss (store)",
90 	"address error (load or I-fetch)",
91 	"address error (store)",
92 	"bus error (I-fetch)",
93 	"bus error (load or store)",
94 	"system call",
95 	"breakpoint",
96 	"reserved instruction",
97 	"coprocessor unusable",
98 	"arithmetic overflow",
99 	"r4k trap/r3k reserved 13",
100 	"r4k virtual coherency instruction/r3k reserved 14",
101 	"r4k floating point/ r3k reserved 15",
102 	"mips NMI",
103 	"reserved 17",
104 	"mipsNN cp2 exception",
105 	"mipsNN TLBRI",
106 	"mipsNN TLBXI",
107 	"reserved 21",
108 	"mips64 MDMX",
109 	"r4k watch",
110 	"mipsNN machine check",
111 	"mipsNN thread",
112 	"DSP exception",
113 	"reserved 27",
114 	"reserved 28",
115 	"reserved 29",
116 	"mipsNN cache error",
117 	"r4000 virtual coherency data",
118 };
119 
120 void trap(uint32_t, uint32_t, vaddr_t, vaddr_t, struct trapframe *);
121 void ast(void);
122 
123 /*
124  * fork syscall returns directly to user process via lwp_trampoline(),
125  * which will be called the very first time when child gets running.
126  */
127 void
child_return(void * arg)128 child_return(void *arg)
129 {
130 	struct lwp *l = arg;
131 	struct trapframe *utf = l->l_md.md_utf;
132 
133 	utf->tf_regs[_R_V0] = 0;
134 	utf->tf_regs[_R_V1] = 1;
135 	utf->tf_regs[_R_A3] = 0;
136 	userret(l);
137 	ktrsysret(SYS_fork, 0, 0);
138 }
139 
140 #ifdef MIPS3_PLUS
141 #define TRAPTYPE(x) (((x) & MIPS3_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT)
142 #else
143 #define TRAPTYPE(x) (((x) & MIPS1_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT)
144 #endif
145 #define KERNLAND_P(x) ((intptr_t)(x) < 0)
146 
147 /*
148  * Trap is called from locore to handle most types of processor traps.
149  * System calls are broken out for efficiency.  MIPS can handle software
150  * interrupts as a part of real interrupt processing.
151  */
152 void
trap(uint32_t status,uint32_t cause,vaddr_t vaddr,vaddr_t pc,struct trapframe * tf)153 trap(uint32_t status, uint32_t cause, vaddr_t vaddr, vaddr_t pc,
154     struct trapframe *tf)
155 {
156 	int type;
157 	struct lwp * const l = curlwp;
158 	struct proc * const p = curproc;
159 	struct trapframe * const utf = l->l_md.md_utf;
160 	struct pcb * const pcb = lwp_getpcb(l);
161 	vm_prot_t ftype;
162 	ksiginfo_t ksi;
163 	extern void fswintrberr(void);
164 	void *onfault;
165 	int rv;
166 
167 	KSI_INIT_TRAP(&ksi);
168 
169 	curcpu()->ci_data.cpu_ntrap++;
170 	if (CPUISMIPS3 && (status & MIPS3_SR_NMI)) {
171 		type = T_NMI;
172 	} else {
173 		type = TRAPTYPE(cause);
174 	}
175 	if (USERMODE(status)) {
176 		tf = utf;
177 		type |= T_USER;
178 		LWP_CACHE_CREDS(l, p);
179 	}
180 
181 	switch (type) {
182 	default:
183 	dopanic:
184 		(void)splhigh();
185 
186 		/*
187 		 * use snprintf to allow a single, idempotent, readable printf
188 		 */
189 		char strbuf[256], *str = strbuf;
190 		int n, sz = sizeof(strbuf);
191 
192 		n = snprintf(str, sz, "pid %d(%s): ", p->p_pid, p->p_comm);
193 		sz -= n;
194 		str += n;
195 		n = snprintf(str, sz, "trap: cpu%d, %s in %s mode\n",
196 			cpu_number(), trap_names[TRAPTYPE(cause)],
197 			USERMODE(status) ? "user" : "kernel");
198 		sz -= n;
199 		str += n;
200 		n = snprintf(str, sz, "status=%#x, cause=%#x, epc=%#"
201 			PRIxVADDR ", vaddr=%#" PRIxVADDR "\n",
202 			status, cause, pc, vaddr);
203 		sz -= n;
204 		str += n;
205 		if (USERMODE(status)) {
206 			KASSERT(tf == utf);
207 			n = snprintf(str, sz, "frame=%p usp=%#" PRIxREGISTER
208 			    " ra=%#" PRIxREGISTER "\n",
209 			    tf, tf->tf_regs[_R_SP], tf->tf_regs[_R_RA]);
210 			sz -= n;
211 			str += n;
212 		} else {
213 			n = snprintf(str, sz, "tf=%p ksp=%p ra=%#"
214 			    PRIxREGISTER " ppl=%#x\n", tf,
215 			    type == T_NMI
216 				? (void*)(uintptr_t)tf->tf_regs[_R_SP]
217 				: tf+1,
218 			    tf->tf_regs[_R_RA], tf->tf_ppl);
219 			sz -= n;
220 			str += n;
221 		}
222 		printf("%s", strbuf);
223 
224 		if (type == T_BUS_ERR_IFETCH || type == T_BUS_ERR_LD_ST)
225 			(void)(*mips_locoresw.lsw_bus_error)(cause);
226 
227 #if defined(DDB)
228 		kdb_trap(type, &tf->tf_registers);
229 		/* XXX force halt XXX */
230 #elif defined(KGDB)
231 		{
232 			extern mips_reg_t kgdb_cause, kgdb_vaddr;
233 			struct reg *regs = &ddb_regs;
234 			kgdb_cause = cause;
235 			kgdb_vaddr = vaddr;
236 
237 			/*
238 			 * init global ddb_regs, used in db_interface.c routines
239 			 * shared between ddb and gdb. Send ddb_regs to gdb so
240 			 * that db_machdep.h macros will work with it, and
241 			 * allow gdb to alter the PC.
242 			 */
243 			db_set_ddb_regs(type, tf);
244 			PC_BREAK_ADVANCE(regs);
245 			if (kgdb_trap(type, regs)) {
246 				tf->tf_regs[TF_EPC] = regs->r_regs[_R_PC];
247 				return;
248 			}
249 		}
250 #else
251 		panic("trap");
252 #endif
253 		/*NOTREACHED*/
254 	case T_TLB_MOD:
255 	case T_TLB_MOD+T_USER: {
256 		const bool user_p = (type & T_USER) || !KERNLAND_P(vaddr);
257 		pmap_t pmap = user_p
258 		    ? p->p_vmspace->vm_map.pmap
259 		    : pmap_kernel();
260 
261 		kpreempt_disable();
262 
263 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, vaddr);
264 		if (!ptep)
265 			panic("%ctlbmod: %#"PRIxVADDR": no pte",
266 			    user_p ? 'u' : 'k', vaddr);
267 		pt_entry_t pte = *ptep;
268 		if (!pte_valid_p(pte)) {
269 			panic("%ctlbmod: %#"PRIxVADDR": invalid pte %#"PRIx32
270 			    " @ ptep %p", user_p ? 'u' : 'k', vaddr,
271 			    pte_value(pte), ptep);
272 		}
273 		if (pte_readonly_p(pte)) {
274 			/* write to read only page */
275 			ftype = VM_PROT_WRITE;
276 			kpreempt_enable();
277 			if (user_p) {
278 				goto pagefault;
279 			} else {
280 				goto kernelfault;
281 			}
282 		}
283 		UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
284 		UVMHIST_LOG(maphist, "%ctlbmod(va=%#lx, pc=%#lx, tf=%p)",
285 		    user_p ? 'u' : 'k', vaddr, pc, tf);
286 		if (!pte_modified_p(pte)) {
287 			pte |= mips_pg_m_bit();
288 #ifdef MULTIPROCESSOR
289 			atomic_or_32(ptep, mips_pg_m_bit());
290 #else
291 			*ptep = pte;
292 #endif
293 		}
294 		// We got a TLB MOD exception so we must have a valid ASID
295 		// and there must be a matching entry in the TLB.  So when
296 		// we try to update it, we better have done it.
297 		KASSERTMSG(pte_valid_p(pte), "%#"PRIx32, pte_value(pte));
298 		vaddr = trunc_page(vaddr);
299 		int ok = pmap_tlb_update_addr(pmap, vaddr, pte, 0);
300 		kpreempt_enable();
301 		if (ok != 1)
302 			printf("pmap_tlb_update_addr(%p,%#"
303 			    PRIxVADDR",%#"PRIxPTE", 0) returned %d",
304 			    pmap, vaddr, pte_value(pte), ok);
305 		paddr_t pa = pte_to_paddr(pte);
306 		KASSERTMSG(uvm_pageismanaged(pa),
307 		    "%#"PRIxVADDR" pa %#"PRIxPADDR, vaddr, pa);
308 		pmap_set_modified(pa);
309 		if (type & T_USER)
310 			userret(l);
311 		UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
312 		return; /* GEN */
313 	}
314 	case T_TLB_LD_MISS:
315 	case T_TLB_ST_MISS:
316 		ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE;
317 		if (KERNLAND_P(vaddr))
318 			goto kernelfault;
319 		/*
320 		 * It is an error for the kernel to access user space except
321 		 * through the copyin/copyout routines.
322 		 */
323 		if (pcb->pcb_onfault == NULL) {
324 			goto dopanic;
325 		}
326 		/* check for fuswintr() or suswintr() getting a page fault */
327 		if (pcb->pcb_onfault == (void *)fswintrberr) {
328 			tf->tf_regs[_R_PC] = (intptr_t)pcb->pcb_onfault;
329 			return; /* KERN */
330 		}
331 		goto pagefault;
332 	case T_TLB_LD_MISS+T_USER:
333 		ftype = VM_PROT_READ;
334 		goto pagefault;
335 	case T_TLB_ST_MISS+T_USER:
336 		ftype = VM_PROT_WRITE;
337 	pagefault: {
338 		const vaddr_t va = trunc_page(vaddr);
339 		struct vmspace * const vm = p->p_vmspace;
340 		struct vm_map * const map = &vm->vm_map;
341 #ifdef PMAP_FAULTINFO
342 		struct pcb_faultinfo * const pfi = &pcb->pcb_faultinfo;
343 #endif
344 
345 		kpreempt_disable();
346 #ifdef _LP64
347 		/*
348 		 * If the pmap has been activated and we allocated the segtab
349 		 * for the low 4GB, seg0tab may still be NULL.  We can't
350 		 * really fix this in pmap_enter (we can only update the local
351 		 * cpu's cpu_info but not other cpu's) so we need to detect
352 		 * and fix this here.
353 		 */
354 		struct cpu_info * const ci = curcpu();
355 		if ((va >> XSEGSHIFT) == 0 &&
356 		    __predict_false(ci->ci_pmap_user_seg0tab == NULL
357 				&& ci->ci_pmap_user_segtab->seg_seg[0] != NULL)) {
358 			ci->ci_pmap_user_seg0tab =
359 			    ci->ci_pmap_user_segtab->seg_seg[0];
360 			kpreempt_enable();
361 			if (type & T_USER) {
362 				userret(l);
363 			}
364 			return; /* GEN */
365 		}
366 #endif
367 		KASSERT(KERNLAND_P(va) || curcpu()->ci_pmap_asid_cur != 0);
368 		pmap_tlb_asid_check();
369 		kpreempt_enable();
370 
371 #ifdef PMAP_FAULTINFO
372 		if (p->p_pid == pfi->pfi_lastpid && va == pfi->pfi_faultaddr) {
373 			if (++pfi->pfi_repeats > 4) {
374 				tlb_asid_t asid = tlb_get_asid();
375 				pt_entry_t *ptep = pfi->pfi_faultpte;
376 				printf("trap: fault #%u (%s/%s) for %#"PRIxVADDR" (%#"PRIxVADDR") at pc %#"PRIxVADDR" curpid=%u/%u ptep@%p=%#"PRIxPTE")\n", pfi->pfi_repeats, trap_names[TRAPTYPE(cause)], trap_names[pfi->pfi_faulttype], va, vaddr, pc, map->pmap->pm_pai[0].pai_asid, asid, ptep, ptep ? pte_value(*ptep) : 0);
377 				if (pfi->pfi_repeats >= 4) {
378 					cpu_Debugger();
379 				} else {
380 					pfi->pfi_faulttype = TRAPTYPE(cause);
381 				}
382 			}
383 		} else {
384 			pfi->pfi_lastpid = p->p_pid;
385 			pfi->pfi_faultaddr = va;
386 			pfi->pfi_repeats = 0;
387 			pfi->pfi_faultpte = NULL;
388 			pfi->pfi_faulttype = TRAPTYPE(cause);
389 		}
390 #endif /* PMAP_FAULTINFO */
391 
392 		onfault = pcb->pcb_onfault;
393 		pcb->pcb_onfault = NULL;
394 		if (p->p_emul->e_fault) {
395 			rv = (*p->p_emul->e_fault)(p, va, ftype);
396 		} else {
397 			rv = uvm_fault(map, va, ftype);
398 		}
399 		pcb->pcb_onfault = onfault;
400 
401 #if defined(VMFAULT_TRACE)
402 		if (!KERNLAND_P(va))
403 			printf(
404 			    "uvm_fault(%p (pmap %p), %#"PRIxVADDR
405 			    " (%"PRIxVADDR"), %d) -> %d at pc %#"PRIxVADDR"\n",
406 			    map, vm->vm_map.pmap, va, vaddr, ftype, rv, pc);
407 #endif
408 		/*
409 		 * If this was a stack access we keep track of the maximum
410 		 * accessed stack size.  Also, if vm_fault gets a protection
411 		 * failure it is due to accessing the stack region outside
412 		 * the current limit and we need to reflect that as an access
413 		 * error.
414 		 */
415 		if ((void *)va >= vm->vm_maxsaddr) {
416 			if (rv == 0)
417 				uvm_grow(p, va);
418 			else if (rv == EACCES)
419 				rv = EFAULT;
420 		}
421 		if (rv == 0) {
422 #ifdef PMAP_FAULTINFO
423 			if (pfi->pfi_repeats == 0) {
424 				pfi->pfi_faultpte =
425 				    pmap_pte_lookup(map->pmap, va);
426 			}
427 			KASSERT(*(pt_entry_t *)pfi->pfi_faultpte);
428 #endif
429 			if (type & T_USER) {
430 				userret(l);
431 			}
432 			return; /* GEN */
433 		}
434 		if ((type & T_USER) == 0)
435 			goto copyfault;
436 		if (rv == ENOMEM) {
437 			printf("UVM: pid %d (%s), uid %d killed: out of swap\n",
438 			       p->p_pid, p->p_comm,
439 			       l->l_cred ?
440 			       kauth_cred_geteuid(l->l_cred) : (uid_t) -1);
441 			ksi.ksi_signo = SIGKILL;
442 			ksi.ksi_code = 0;
443 		} else {
444 			if (rv == EACCES) {
445 				ksi.ksi_signo = SIGBUS;
446 				ksi.ksi_code = BUS_OBJERR;
447 			} else {
448 				ksi.ksi_signo = SIGSEGV;
449 				ksi.ksi_code = SEGV_MAPERR;
450 			}
451 		}
452 		ksi.ksi_trap = type & ~T_USER;
453 		ksi.ksi_addr = (void *)vaddr;
454 		break; /* SIGNAL */
455 	}
456 	kernelfault: {
457 		onfault = pcb->pcb_onfault;
458 
459 		pcb->pcb_onfault = NULL;
460 		rv = uvm_fault(kernel_map, trunc_page(vaddr), ftype);
461 		pcb->pcb_onfault = onfault;
462 		if (rv == 0)
463 			return; /* KERN */
464 		goto copyfault;
465 	}
466 	case T_ADDR_ERR_LD:	/* misaligned access */
467 	case T_ADDR_ERR_ST:	/* misaligned access */
468 	case T_BUS_ERR_LD_ST:	/* BERR asserted to CPU */
469 		onfault = pcb->pcb_onfault;
470 		rv = EFAULT;
471 	copyfault:
472 		if (onfault == NULL) {
473 			goto dopanic;
474 		}
475 		tf->tf_regs[_R_PC] = (intptr_t)onfault;
476 		tf->tf_regs[_R_V0] = rv;
477 		return; /* KERN */
478 
479 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
480 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
481 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to CPU */
482 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to CPU */
483 		ksi.ksi_trap = type & ~T_USER;
484 		ksi.ksi_addr = (void *)vaddr;
485 		if (KERNLAND_P(vaddr)) {
486 			ksi.ksi_signo = SIGSEGV;
487 			ksi.ksi_code = SEGV_MAPERR;
488 		} else {
489 			ksi.ksi_signo = SIGBUS;
490 			if (type == T_BUS_ERR_IFETCH+T_USER
491 			    || type == T_BUS_ERR_LD_ST+T_USER)
492 				ksi.ksi_code = BUS_OBJERR;
493 			else
494 				ksi.ksi_code = BUS_ADRALN;
495 		}
496 		break; /* SIGNAL */
497 
498 	case T_WATCH:
499 	case T_BREAK:
500 #if defined(DDB)
501 		kdb_trap(type, &tf->tf_registers);
502 		return;	/* KERN */
503 #elif defined(KGDB)
504 		{
505 			extern mips_reg_t kgdb_cause, kgdb_vaddr;
506 			struct reg *regs = &ddb_regs;
507 			kgdb_cause = cause;
508 			kgdb_vaddr = vaddr;
509 
510 			/*
511 			 * init global ddb_regs, used in db_interface.c routines
512 			 * shared between ddb and gdb. Send ddb_regs to gdb so
513 			 * that db_machdep.h macros will work with it, and
514 			 * allow gdb to alter the PC.
515 			 */
516 			db_set_ddb_regs(type, &tf->tf_registers);
517 			PC_BREAK_ADVANCE(regs);
518 			if (!kgdb_trap(type, regs))
519 				printf("kgdb: ignored %s\n",
520 				       trap_names[TRAPTYPE(cause)]);
521 			else
522 				tf->tf_regs[_R_PC] = regs->r_regs[_R_PC];
523 
524 			return;
525 		}
526 #else
527 		goto dopanic;
528 #endif
529 	case T_BREAK+T_USER: {
530 		uint32_t instr;
531 
532 		/* compute address of break instruction */
533 		vaddr_t va = pc + (cause & MIPS_CR_BR_DELAY ? sizeof(int) : 0);
534 
535 		/* read break instruction */
536 		instr = ufetch_uint32((void *)va);
537 
538 		if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) {
539 			ksi.ksi_trap = type & ~T_USER;
540 			ksi.ksi_signo = SIGTRAP;
541 			ksi.ksi_addr = (void *)va;
542 			ksi.ksi_code = TRAP_TRACE;
543 			break;
544 		}
545 		/*
546 		 * Restore original instruction and clear BP
547 		 */
548 		rv = ustore_uint32_isync((void *)va, l->l_md.md_ss_instr);
549 		if (rv < 0) {
550 			vaddr_t sa, ea;
551 			sa = trunc_page(va);
552 			ea = round_page(va + sizeof(int) - 1);
553 			rv = uvm_map_protect(&p->p_vmspace->vm_map,
554 				sa, ea, VM_PROT_ALL, false);
555 			if (rv == 0) {
556 				rv = ustore_uint32_isync((void *)va, l->l_md.md_ss_instr);
557 				(void)uvm_map_protect(&p->p_vmspace->vm_map,
558 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
559 			}
560 		}
561 		mips_icache_sync_all();		/* XXXJRT -- necessary? */
562 		mips_dcache_wbinv_all();	/* XXXJRT -- necessary? */
563 
564 		if (rv < 0)
565 			printf("Warning: can't restore instruction"
566 			    " at %#"PRIxVADDR": 0x%x\n",
567 			    l->l_md.md_ss_addr, l->l_md.md_ss_instr);
568 		l->l_md.md_ss_addr = 0;
569 		ksi.ksi_trap = type & ~T_USER;
570 		ksi.ksi_signo = SIGTRAP;
571 		ksi.ksi_addr = (void *)va;
572 		ksi.ksi_code = TRAP_BRKPT;
573 		break; /* SIGNAL */
574 	}
575 	case T_DSP+T_USER:
576 #if (MIPS32R2 + MIPS64R2) > 0
577 		if (MIPS_HAS_DSP) {
578 			dsp_load();
579 			userret(l);
580 			return; /* GEN */
581 		}
582 #endif /* (MIPS32R3 + MIPS64R2) > 0 */
583 		/* FALLTHROUGH */
584 	case T_RES_INST+T_USER:
585 	case T_COP_UNUSABLE+T_USER:
586 #if !defined(FPEMUL) && !defined(NOFPU)
587 		if ((cause & MIPS_CR_COP_ERR) == 0x10000000) {
588 			fpu_load();          	/* load FPA */
589 		} else
590 #endif
591 		{
592 			mips_emul_inst(status, cause, pc, utf);
593 		}
594 		userret(l);
595 		return; /* GEN */
596 	case T_FPE+T_USER:
597 #if defined(FPEMUL)
598 		mips_emul_inst(status, cause, pc, utf);
599 #elif !defined(NOFPU)
600 		utf->tf_regs[_R_CAUSE] = cause;
601 		mips_fpu_trap(pc, utf);
602 #endif
603 		userret(l);
604 		return; /* GEN */
605 	case T_OVFLOW+T_USER:
606 	case T_TRAP+T_USER:
607 		ksi.ksi_trap = type & ~T_USER;
608 		ksi.ksi_signo = SIGFPE;
609 		ksi.ksi_addr = (void *)(intptr_t)pc /*utf->tf_regs[_R_PC]*/;
610 		ksi.ksi_code = FPE_FLTOVF; /* XXX */
611 		break; /* SIGNAL */
612 	}
613 	utf->tf_regs[_R_CAUSE] = cause;
614 	utf->tf_regs[_R_BADVADDR] = vaddr;
615 #if defined(DEBUG)
616 	printf("trap: pid %d(%s): sig %d: cause=%#x epc=%#"PRIxREGISTER
617 	    " va=%#"PRIxVADDR"\n",
618 	    p->p_pid, p->p_comm, ksi.ksi_signo, cause,
619 	    utf->tf_regs[_R_PC], vaddr);
620 	printf("registers:\n");
621 	for (size_t i = 0; i < 32; i += 4) {
622 		printf(
623 		    "[%2zu]=%08"PRIxREGISTER" [%2zu]=%08"PRIxREGISTER
624 		    " [%2zu]=%08"PRIxREGISTER" [%2zu]=%08"PRIxREGISTER "\n",
625 		    i+0, utf->tf_regs[i+0], i+1, utf->tf_regs[i+1],
626 		    i+2, utf->tf_regs[i+2], i+3, utf->tf_regs[i+3]);
627 	}
628 #endif
629 	(*p->p_emul->e_trapsignal)(l, &ksi);
630 	if ((type & T_USER) == 0) {
631 #ifdef DDB
632 		Debugger();
633 #endif
634 		panic("trapsignal");
635 	}
636 	userret(l);
637 	return;
638 }
639 
640 /*
641  * Handle asynchronous software traps.
642  * This is called from MachUserIntr() either to deliver signals or
643  * to make involuntary context switch (preemption).
644  */
645 void
ast(void)646 ast(void)
647 {
648 	struct lwp * const l = curlwp;
649 	u_int astpending;
650 
651 	while ((astpending = l->l_md.md_astpending) != 0) {
652 		//curcpu()->ci_data.cpu_nast++;
653 		l->l_md.md_astpending = 0;
654 
655 #ifdef MULTIPROCESSOR
656 		{
657 			kpreempt_disable();
658 			struct cpu_info * const ci = l->l_cpu;
659 			if (ci->ci_tlb_info->ti_synci_page_bitmap != 0)
660 				pmap_tlb_syncicache_ast(ci);
661 			kpreempt_enable();
662 		}
663 #endif
664 
665 		if (l->l_pflag & LP_OWEUPC) {
666 			l->l_pflag &= ~LP_OWEUPC;
667 			ADDUPROF(l);
668 		}
669 
670 		userret(l);
671 
672 		if (l->l_cpu->ci_want_resched) {
673 			/*
674 			 * We are being preempted.
675 			 */
676 			preempt();
677 		}
678 	}
679 }
680 
681 
682 /* XXX need to rewrite acient comment XXX
683  * This routine is called by procxmt() to single step one instruction.
684  * We do this by storing a break instruction after the current instruction,
685  * resuming execution, and then restoring the old instruction.
686  */
687 int
mips_singlestep(struct lwp * l)688 mips_singlestep(struct lwp *l)
689 {
690 	struct trapframe * const tf = l->l_md.md_utf;
691 	struct proc * const p = l->l_proc;
692 	vaddr_t pc, va;
693 	int rv;
694 
695 	if (l->l_md.md_ss_addr) {
696 		printf("SS %s (%d): breakpoint already set at %#"PRIxVADDR"\n",
697 			p->p_comm, p->p_pid, l->l_md.md_ss_addr);
698 		return EFAULT;
699 	}
700 	pc = (vaddr_t)tf->tf_regs[_R_PC];
701 	if (ufetch_uint32((void *)pc) != 0) { /* not a NOP instruction */
702 		struct pcb * const pcb = lwp_getpcb(l);
703 		va = mips_emul_branch(tf, pc, PCB_FSR(pcb), true);
704 	} else {
705 		va = pc + sizeof(int);
706 	}
707 
708 	/*
709 	 * We can't single-step into a RAS.  Check if we're in
710 	 * a RAS, and set the breakpoint just past it.
711 	 */
712 	if (p->p_raslist != NULL) {
713 		while (ras_lookup(p, (void *)va) != (void *)-1)
714 			va += sizeof(int);
715 	}
716 
717 	l->l_md.md_ss_addr = va;
718 	l->l_md.md_ss_instr = ufetch_uint32((void *)va);
719 	rv = ustore_uint32_isync((void *)va, MIPS_BREAK_SSTEP);
720 	if (rv < 0) {
721 		vaddr_t sa, ea;
722 		sa = trunc_page(va);
723 		ea = round_page(va + sizeof(int) - 1);
724 		rv = uvm_map_protect(&p->p_vmspace->vm_map,
725 		    sa, ea, VM_PROT_ALL, false);
726 		if (rv == 0) {
727 			rv = ustore_uint32_isync((void *)va, MIPS_BREAK_SSTEP);
728 			(void)uvm_map_protect(&p->p_vmspace->vm_map,
729 			    sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
730 		}
731 	}
732 #if 0
733 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
734 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
735 		p->p_md.md_ss_instr, pc, ufetch_uint32((void *)va)); /* XXX */
736 #endif
737 	return 0;
738 }
739 
740 
741 #ifndef DDB_TRACE
742 
743 #if defined(DEBUG) || defined(DDB) || defined(KGDB) || defined(geo)
744 mips_reg_t kdbrpeek(vaddr_t, size_t);
745 
746 int
kdbpeek(vaddr_t addr)747 kdbpeek(vaddr_t addr)
748 {
749 	int rc;
750 
751 	if (addr & 3) {
752 		printf("kdbpeek: unaligned address %#"PRIxVADDR"\n", addr);
753 		/* We might have been called from DDB, so do not go there. */
754 		stacktrace();
755 		rc = -1 ;
756 	} else if (addr == 0) {
757 		printf("kdbpeek: NULL\n");
758 		rc = 0xdeadfeed;
759 	} else {
760 		rc = *(int *)addr;
761 	}
762 	return rc;
763 }
764 
765 mips_reg_t
kdbrpeek(vaddr_t addr,size_t n)766 kdbrpeek(vaddr_t addr, size_t n)
767 {
768 	mips_reg_t rc;
769 
770 	if (addr & (n - 1)) {
771 		printf("kdbrpeek: unaligned address %#"PRIxVADDR"\n", addr);
772 		/* We might have been called from DDB, so do not go there. */
773 		stacktrace();
774 		rc = -1 ;
775 	} else if (addr == 0) {
776 		printf("kdbrpeek: NULL\n");
777 		rc = 0xdeadfeed;
778 	} else {
779 		if (sizeof(mips_reg_t) == 8 && n == 8)
780 			rc = *(int64_t *)addr;
781 		else
782 			rc = *(int32_t *)addr;
783 	}
784 	return rc;
785 }
786 
787 extern char start[], edata[], verylocore[];
788 #ifdef MIPS1
789 extern char mips1_kern_gen_exception[];
790 extern char mips1_user_gen_exception[];
791 extern char mips1_kern_intr[];
792 extern char mips1_user_intr[];
793 extern char mips1_systemcall[];
794 #endif
795 #ifdef MIPS3
796 extern char mips3_kern_gen_exception[];
797 extern char mips3_user_gen_exception[];
798 extern char mips3_kern_intr[];
799 extern char mips3_user_intr[];
800 extern char mips3_systemcall[];
801 #endif
802 #ifdef MIPS32
803 extern char mips32_kern_gen_exception[];
804 extern char mips32_user_gen_exception[];
805 extern char mips32_kern_intr[];
806 extern char mips32_user_intr[];
807 extern char mips32_systemcall[];
808 #endif
809 #ifdef MIPS32R2
810 extern char mips32r2_kern_gen_exception[];
811 extern char mips32r2_user_gen_exception[];
812 extern char mips32r2_kern_intr[];
813 extern char mips32r2_user_intr[];
814 extern char mips32r2_systemcall[];
815 #endif
816 #ifdef MIPS64
817 extern char mips64_kern_gen_exception[];
818 extern char mips64_user_gen_exception[];
819 extern char mips64_kern_intr[];
820 extern char mips64_user_intr[];
821 extern char mips64_systemcall[];
822 #endif
823 #ifdef MIPS64R2
824 extern char mips64r2_kern_gen_exception[];
825 extern char mips64r2_user_gen_exception[];
826 extern char mips64r2_kern_intr[];
827 extern char mips64r2_user_intr[];
828 extern char mips64r2_systemcall[];
829 #endif
830 
831 int main(void *);	/* XXX */
832 
833 /*
834  *  stack trace code, also useful to DDB one day
835  */
836 
837 /* forward */
838 const char *fn_name(vaddr_t addr);
839 void stacktrace_subr(mips_reg_t, mips_reg_t, mips_reg_t, mips_reg_t,
840 	vaddr_t, vaddr_t, vaddr_t, vaddr_t, void (*)(const char*, ...));
841 
842 #define	MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
843 #define	MIPS_JR_K0	0x03400008	/* instruction code for jr k0 */
844 #define	MIPS_ERET	0x42000018	/* instruction code for eret */
845 
846 /*
847  * Do a stack backtrace.
848  * (*printfn)()  prints the output to either the system log,
849  * the console, or both.
850  */
851 void
stacktrace_subr(mips_reg_t a0,mips_reg_t a1,mips_reg_t a2,mips_reg_t a3,vaddr_t pc,vaddr_t sp,vaddr_t fp,vaddr_t ra,void (* printfn)(const char *,...))852 stacktrace_subr(mips_reg_t a0, mips_reg_t a1, mips_reg_t a2, mips_reg_t a3,
853     vaddr_t pc, vaddr_t sp, vaddr_t fp, vaddr_t ra,
854     void (*printfn)(const char*, ...))
855 {
856 	vaddr_t va, subr;
857 	unsigned instr, mask;
858 	InstFmt i;
859 	int more, stksize;
860 	unsigned int frames =  0;
861 	int foundframesize = 0;
862 	mips_reg_t regs[32] = {
863 		[_R_ZERO] = 0,
864 		[_R_A0] = a0, [_R_A1] = a1, [_R_A2] = a2, [_R_A3] = a3,
865 		[_R_RA] = ra,
866 	};
867 #ifdef DDB
868 	db_expr_t diff;
869 	db_sym_t sym;
870 #endif
871 
872 /* Jump here when done with a frame, to start a new one */
873 loop:
874 	stksize = 0;
875 	subr = 0;
876 	mask = 1;
877 	if (frames++ > 100) {
878 		(*printfn)("\nstackframe count exceeded\n");
879 		/* return breaks stackframe-size heuristics with gcc -O2 */
880 		goto finish;	/*XXX*/
881 	}
882 
883 	/* check for bad SP: could foul up next frame */
884 	if ((sp & (sizeof(sp)-1)) || (intptr_t)sp >= 0) {
885 		(*printfn)("SP 0x%x: not in kernel\n", sp);
886 		ra = 0;
887 		subr = 0;
888 		goto done;
889 	}
890 
891 	/* Check for bad PC */
892 	if (pc & 3 || (intptr_t)pc >= 0 || (intptr_t)pc >= (intptr_t)edata) {
893 		(*printfn)("PC 0x%x: not in kernel space\n", pc);
894 		ra = 0;
895 		goto done;
896 	}
897 
898 #ifdef DDB
899 	/*
900 	 * Check the kernel symbol table to see the beginning of
901 	 * the current subroutine.
902 	 */
903 	diff = 0;
904 	sym = db_search_symbol(pc, DB_STGY_ANY, &diff);
905 	if (sym != DB_SYM_NULL && diff == 0) {
906 		/* check func(foo) __attribute__((__noreturn__)) case */
907 		instr = kdbpeek(pc - 2 * sizeof(int));
908 		i.word = instr;
909 		if (i.JType.op == OP_JAL) {
910 			sym = db_search_symbol(pc - sizeof(int),
911 			    DB_STGY_ANY, &diff);
912 			if (sym != DB_SYM_NULL && diff != 0)
913 				diff += sizeof(int);
914 		}
915 	}
916 	if (sym == DB_SYM_NULL) {
917 		ra = 0;
918 		goto done;
919 	}
920 	va = pc - diff;
921 #else
922 	/*
923 	 * Find the beginning of the current subroutine by scanning backwards
924 	 * from the current PC for the end of the previous subroutine.
925 	 *
926 	 * XXX This won't work well because nowadays gcc is so aggressive
927 	 *     as to reorder instruction blocks for branch-predict.
928 	 *     (i.e. 'jr ra' wouldn't indicate the end of subroutine)
929 	 */
930 	va = pc;
931 	do {
932 		va -= sizeof(int);
933 		if (va <= (vaddr_t)verylocore)
934 			goto finish;
935 		instr = kdbpeek(va);
936 		if (instr == MIPS_ERET)
937 			goto mips3_eret;
938 	} while (instr != MIPS_JR_RA && instr != MIPS_JR_K0);
939 	/* skip back over branch & delay slot */
940 	va += sizeof(int);
941 mips3_eret:
942 	va += sizeof(int);
943 	/* skip over nulls which might separate .o files */
944 	while ((instr = kdbpeek(va)) == 0)
945 		va += sizeof(int);
946 #endif
947 	subr = va;
948 
949 	/* scan forwards to find stack size and any saved registers */
950 	stksize = 0;
951 	more = 3;
952 	mask &= 0x40ff0001;	/* if s0-s8 are valid, leave then as valid */
953 	foundframesize = 0;
954 	for (va = subr; more; va += sizeof(int),
955 			      more = (more == 3) ? 3 : more - 1) {
956 		/* stop if hit our current position */
957 		if (va >= pc)
958 			break;
959 		instr = kdbpeek(va);
960 		i.word = instr;
961 		switch (i.JType.op) {
962 		case OP_SPECIAL:
963 			switch (i.RType.func) {
964 			case OP_JR:
965 			case OP_JALR:
966 				more = 2; /* stop after next instruction */
967 				break;
968 
969 			case OP_ADD:
970 			case OP_ADDU:
971 			case OP_DADD:
972 			case OP_DADDU:
973 				if (!(mask & (1 << i.RType.rd))
974 				    || !(mask & (1 << i.RType.rt)))
975 					break;
976 				if (i.RType.rd != _R_ZERO)
977 					break;
978 				mask |= (1 << i.RType.rs);
979 				regs[i.RType.rs] = regs[i.RType.rt];
980 				if (i.RType.func >= OP_DADD)
981 					break;
982 				regs[i.RType.rs] = (int32_t)regs[i.RType.rs];
983 				break;
984 
985 			case OP_SYSCALL:
986 			case OP_BREAK:
987 				more = 1; /* stop now */
988 				break;
989 			}
990 			break;
991 
992 		case OP_REGIMM:
993 		case OP_J:
994 		case OP_JAL:
995 		case OP_BEQ:
996 		case OP_BNE:
997 		case OP_BLEZ:
998 		case OP_BGTZ:
999 			more = 2; /* stop after next instruction */
1000 			break;
1001 
1002 		case OP_COP0:
1003 		case OP_COP1:
1004 		case OP_COP2:
1005 		case OP_COP3:
1006 			switch (i.RType.rs) {
1007 			case OP_BCx:
1008 			case OP_BCy:
1009 				more = 2; /* stop after next instruction */
1010 			};
1011 			break;
1012 
1013 		case OP_SW:
1014 #if !defined(__mips_o32)
1015 		case OP_SD:
1016 #endif
1017 		{
1018 			size_t size = (i.JType.op == OP_SW) ? 4 : 8;
1019 
1020 			/* look for saved registers on the stack */
1021 			if (i.IType.rs != _R_SP)
1022 				break;
1023 			switch (i.IType.rt) {
1024 			case _R_A0: /* a0 */
1025 			case _R_A1: /* a1 */
1026 			case _R_A2: /* a2 */
1027 			case _R_A3: /* a3 */
1028 			case _R_S0: /* s0 */
1029 			case _R_S1: /* s1 */
1030 			case _R_S2: /* s2 */
1031 			case _R_S3: /* s3 */
1032 			case _R_S4: /* s4 */
1033 			case _R_S5: /* s5 */
1034 			case _R_S6: /* s6 */
1035 			case _R_S7: /* s7 */
1036 			case _R_S8: /* s8 */
1037 			case _R_RA: /* ra */
1038 				regs[i.IType.rt] =
1039 				    kdbrpeek(sp + (int16_t)i.IType.imm, size);
1040 				mask |= (1 << i.IType.rt);
1041 				break;
1042 			}
1043 			break;
1044 		}
1045 
1046 		case OP_ADDI:
1047 		case OP_ADDIU:
1048 #if !defined(__mips_o32)
1049 		case OP_DADDI:
1050 		case OP_DADDIU:
1051 #endif
1052 			/* look for stack pointer adjustment */
1053 			if (i.IType.rs != _R_SP || i.IType.rt != _R_SP)
1054 				break;
1055 			/* don't count pops for mcount */
1056 			if (!foundframesize) {
1057 				stksize = - ((short)i.IType.imm);
1058 				foundframesize = 1;
1059 			}
1060 			break;
1061 		}
1062 	}
1063 done:
1064 	if (mask & (1 << _R_RA))
1065 		ra = regs[_R_RA];
1066 	(*printfn)("%#"PRIxVADDR": %s+%"PRIxVADDR" (%"PRIxREGISTER",%"PRIxREGISTER",%"PRIxREGISTER",%"PRIxREGISTER") ra %"PRIxVADDR" sz %d\n",
1067 		sp, fn_name(subr), pc - subr,
1068 		regs[_R_A0], regs[_R_A1], regs[_R_A2], regs[_R_A3],
1069 		ra, stksize);
1070 
1071 	if (ra) {
1072 		if (pc == ra && stksize == 0)
1073 			(*printfn)("stacktrace: loop!\n");
1074 		else {
1075 			pc = ra;
1076 			sp += stksize;
1077 			ra = 0;
1078 			goto loop;
1079 		}
1080 	} else {
1081 finish:
1082 		(*printfn)("User-level: pid %d.%d\n",
1083 		    curlwp->l_proc->p_pid, curlwp->l_lid);
1084 	}
1085 }
1086 
1087 /*
1088  * Functions ``special'' enough to print by name
1089  */
1090 #define Name(_fn)  { (void*)_fn, # _fn }
1091 const static struct { void *addr; const char *name;} names[] = {
1092 	Name(stacktrace),
1093 	Name(stacktrace_subr),
1094 	Name(main),
1095 	Name(trap),
1096 
1097 #ifdef MIPS1	/*  r2000 family  (mips-I CPU) */
1098 	Name(mips1_kern_gen_exception),
1099 	Name(mips1_user_gen_exception),
1100 	Name(mips1_systemcall),
1101 	Name(mips1_kern_intr),
1102 	Name(mips1_user_intr),
1103 #endif	/* MIPS1 */
1104 
1105 #if defined(MIPS3)			/* r4000 family (mips-III CPU) */
1106 	Name(mips3_kern_gen_exception),
1107 	Name(mips3_user_gen_exception),
1108 	Name(mips3_systemcall),
1109 	Name(mips3_kern_intr),
1110 	Name(mips3_user_intr),
1111 #endif	/* MIPS3 */
1112 
1113 #if defined(MIPS32)			/* MIPS32 family (mips-III CPU) */
1114 	Name(mips32_kern_gen_exception),
1115 	Name(mips32_user_gen_exception),
1116 	Name(mips32_systemcall),
1117 	Name(mips32_kern_intr),
1118 	Name(mips32_user_intr),
1119 #endif	/* MIPS32 */
1120 
1121 #if defined(MIPS32R2)			/* MIPS32R2 family (mips-III CPU) */
1122 	Name(mips32r2_kern_gen_exception),
1123 	Name(mips32r2_user_gen_exception),
1124 	Name(mips32r2_systemcall),
1125 	Name(mips32r2_kern_intr),
1126 	Name(mips32r2_user_intr),
1127 #endif	/* MIPS32R2 */
1128 
1129 #if defined(MIPS64)			/* MIPS64 family (mips-III CPU) */
1130 	Name(mips64_kern_gen_exception),
1131 	Name(mips64_user_gen_exception),
1132 	Name(mips64_systemcall),
1133 	Name(mips64_kern_intr),
1134 	Name(mips64_user_intr),
1135 #endif	/* MIPS64 */
1136 
1137 #if defined(MIPS64R2)			/* MIPS64R2 family (mips-III CPU) */
1138 	Name(mips64r2_kern_gen_exception),
1139 	Name(mips64r2_user_gen_exception),
1140 	Name(mips64r2_systemcall),
1141 	Name(mips64r2_kern_intr),
1142 	Name(mips64r2_user_intr),
1143 #endif	/* MIPS64R2 */
1144 
1145 	Name(cpu_idle),
1146 	Name(cpu_switchto),
1147 	{0, 0}
1148 };
1149 
1150 /*
1151  * Map a function address to a string name, if known; or a hex string.
1152  */
1153 const char *
fn_name(vaddr_t addr)1154 fn_name(vaddr_t addr)
1155 {
1156 	static char buf[17];
1157 	int i = 0;
1158 #ifdef DDB
1159 	db_expr_t diff;
1160 	db_sym_t sym;
1161 	const char *symname;
1162 #endif
1163 
1164 #ifdef DDB
1165 	diff = 0;
1166 	symname = NULL;
1167 	sym = db_search_symbol(addr, DB_STGY_ANY, &diff);
1168 	db_symbol_values(sym, &symname, 0);
1169 	if (symname && diff == 0)
1170 		return (symname);
1171 #endif
1172 	for (i = 0; names[i].name; i++)
1173 		if (names[i].addr == (void*)addr)
1174 			return (names[i].name);
1175 	snprintf(buf, sizeof(buf), "%#"PRIxVADDR, addr);
1176 	return (buf);
1177 }
1178 
1179 #endif /* DEBUG */
1180 #endif /* DDB_TRACE */
1181