xref: /netbsd/sys/arch/riscv/riscv/trap.c (revision 0cdd3313)
1 /*	$NetBSD: trap.c,v 1.21 2023/05/07 12:41:49 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 
34 #define	__PMAP_PRIVATE
35 #define	__UFETCHSTORE_PRIVATE
36 
37 __RCSID("$NetBSD: trap.c,v 1.21 2023/05/07 12:41:49 skrll Exp $");
38 
39 #include <sys/param.h>
40 
41 #include <sys/atomic.h>
42 #include <sys/kauth.h>
43 #include <sys/signal.h>
44 #include <sys/signalvar.h>
45 #include <sys/siginfo.h>
46 #include <sys/systm.h>
47 
48 #include <uvm/uvm.h>
49 
50 #include <machine/locore.h>
51 #include <machine/machdep.h>
52 #include <machine/db_machdep.h>
53 
54 #define	MACHINE_ECALL_TRAP_MASK	(__BIT(CAUSE_MACHINE_ECALL))
55 
56 #define	SUPERVISOR_ECALL_TRAP_MASK					\
57 				(__BIT(CAUSE_SUPERVISOR_ECALL))
58 
59 #define	USER_ECALL_TRAP_MASK	(__BIT(CAUSE_USER_ECALL))
60 
61 #define	SYSCALL_TRAP_MASK	(__BIT(CAUSE_SYSCALL))
62 
63 #define	BREAKPOINT_TRAP_MASK	(__BIT(CAUSE_BREAKPOINT))
64 
65 #define	INSTRUCTION_TRAP_MASK	(__BIT(CAUSE_ILLEGAL_INSTRUCTION))
66 
67 #define	FAULT_TRAP_MASK		(__BIT(CAUSE_FETCH_ACCESS) 		\
68 				|__BIT(CAUSE_LOAD_ACCESS) 		\
69 				|__BIT(CAUSE_STORE_ACCESS)		\
70 				|__BIT(CAUSE_FETCH_PAGE_FAULT) 		\
71 				|__BIT(CAUSE_LOAD_PAGE_FAULT) 		\
72 				|__BIT(CAUSE_STORE_PAGE_FAULT))
73 
74 #define	MISALIGNED_TRAP_MASK	(__BIT(CAUSE_FETCH_MISALIGNED)		\
75 				|__BIT(CAUSE_LOAD_MISALIGNED)		\
76 				|__BIT(CAUSE_STORE_MISALIGNED))
77 
78 static const char * const causenames[] = {
79 	[CAUSE_FETCH_MISALIGNED] = "misaligned fetch",
80 	[CAUSE_LOAD_MISALIGNED] = "misaligned load",
81 	[CAUSE_STORE_MISALIGNED] = "misaligned store",
82 	[CAUSE_FETCH_ACCESS] = "fetch",
83 	[CAUSE_LOAD_ACCESS] = "load",
84 	[CAUSE_STORE_ACCESS] = "store",
85 	[CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction",
86 	[CAUSE_BREAKPOINT] = "breakpoint",
87 	[CAUSE_SYSCALL] = "syscall",
88 	[CAUSE_FETCH_PAGE_FAULT] = "instruction page fault",
89 	[CAUSE_LOAD_PAGE_FAULT] = "load page fault",
90 	[CAUSE_STORE_PAGE_FAULT] = "store page fault",
91 };
92 
93 void
cpu_jump_onfault(struct trapframe * tf,const struct faultbuf * fb)94 cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb)
95 {
96 	tf->tf_a0 = fb->fb_reg[FB_A0];
97 	tf->tf_ra = fb->fb_reg[FB_RA];
98 	tf->tf_s0 = fb->fb_reg[FB_S0];
99 	tf->tf_s1 = fb->fb_reg[FB_S1];
100 	tf->tf_s2 = fb->fb_reg[FB_S2];
101 	tf->tf_s3 = fb->fb_reg[FB_S3];
102 	tf->tf_s4 = fb->fb_reg[FB_S4];
103 	tf->tf_s5 = fb->fb_reg[FB_S5];
104 	tf->tf_s6 = fb->fb_reg[FB_S6];
105 	tf->tf_s7 = fb->fb_reg[FB_S7];
106 	tf->tf_s8 = fb->fb_reg[FB_S8];
107 	tf->tf_s9 = fb->fb_reg[FB_S9];
108 	tf->tf_s10 = fb->fb_reg[FB_S10];
109 	tf->tf_s11 = fb->fb_reg[FB_S11];
110 	tf->tf_sp = fb->fb_reg[FB_SP];
111 	tf->tf_pc = fb->fb_reg[FB_RA];
112 }
113 
114 
115 int
copyin(const void * uaddr,void * kaddr,size_t len)116 copyin(const void *uaddr, void *kaddr, size_t len)
117 {
118 	struct faultbuf fb;
119 	int error;
120 
121 	if (__predict_false(len == 0)) {
122 		return 0;
123 	}
124 
125 	// XXXNH cf. VM_MIN_ADDRESS and user_va0_disable
126 	if (uaddr == NULL)
127 		return EFAULT;
128 
129 	const vaddr_t uva = (vaddr_t)uaddr;
130 	if (uva > VM_MAXUSER_ADDRESS - len)
131 		return EFAULT;
132 
133 	csr_sstatus_set(SR_SUM);
134 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
135 		memcpy(kaddr, uaddr, len);
136 		cpu_unset_onfault();
137 	}
138 	csr_sstatus_clear(SR_SUM);
139 
140 	return error;
141 }
142 
143 int
copyout(const void * kaddr,void * uaddr,size_t len)144 copyout(const void *kaddr, void *uaddr, size_t len)
145 {
146 	struct faultbuf fb;
147 	int error;
148 
149 	if (__predict_false(len == 0)) {
150 		return 0;
151 	}
152 
153 	// XXXNH cf. VM_MIN_ADDRESS and user_va0_disable
154 	if (uaddr == NULL)
155 		return EFAULT;
156 
157 	const vaddr_t uva = (vaddr_t)uaddr;
158 	if (uva > VM_MAXUSER_ADDRESS - len)
159 		return EFAULT;
160 
161 	csr_sstatus_set(SR_SUM);
162 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
163 		memcpy(uaddr, kaddr, len);
164 		cpu_unset_onfault();
165 	}
166 	csr_sstatus_clear(SR_SUM);
167 
168 	return error;
169 }
170 
171 int
kcopy(const void * kfaddr,void * kdaddr,size_t len)172 kcopy(const void *kfaddr, void *kdaddr, size_t len)
173 {
174 	struct faultbuf fb;
175 	int error;
176 
177 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
178 		memcpy(kdaddr, kfaddr, len);
179 		cpu_unset_onfault();
180 	}
181 
182 	return error;
183 }
184 
185 int
copyinstr(const void * uaddr,void * kaddr,size_t len,size_t * done)186 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
187 {
188 	struct faultbuf fb;
189 	size_t retlen;
190 	int error;
191 
192 	if (__predict_false(len == 0)) {
193 		return 0;
194 	}
195 
196 	if (__predict_false(uaddr == NULL))
197 		return EFAULT;
198 	/*
199 	 * Can only check if starting user address is out of range here.
200 	 * The string may end before uva + len.
201 	 */
202 	const vaddr_t uva = (vaddr_t)uaddr;
203 	if (uva > VM_MAXUSER_ADDRESS)
204 		return EFAULT;
205 
206 	csr_sstatus_set(SR_SUM);
207 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
208 		retlen = strlcpy(kaddr, uaddr, len);
209 		cpu_unset_onfault();
210 		if (retlen >= len) {
211 			error = ENAMETOOLONG;
212 		} else if (done != NULL) {
213 			*done = retlen + 1;
214 		}
215 	}
216 	csr_sstatus_clear(SR_SUM);
217 
218 	return error;
219 }
220 
221 int
copyoutstr(const void * kaddr,void * uaddr,size_t len,size_t * done)222 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
223 {
224 	struct faultbuf fb;
225 	size_t retlen;
226 	int error;
227 
228 	if (__predict_false(len == 0)) {
229 		return 0;
230 	}
231 
232 	if (__predict_false(uaddr == NULL))
233 		return EFAULT;
234 	/*
235 	 * Can only check if starting user address is out of range here.
236 	 * The string may end before uva + len.
237 	 */
238 	const vaddr_t uva = (vaddr_t)uaddr;
239 	if (uva > VM_MAXUSER_ADDRESS)
240 		return EFAULT;
241 
242 	csr_sstatus_set(SR_SUM);
243 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
244 		retlen = strlcpy(uaddr, kaddr, len);
245 		cpu_unset_onfault();
246 		if (retlen >= len) {
247 			error = ENAMETOOLONG;
248 		} else if (done != NULL) {
249 			*done = retlen + 1;
250 		}
251 	}
252 	csr_sstatus_clear(SR_SUM);
253 
254 	return error;
255 }
256 
257 static const char *
cause_name(register_t cause)258 cause_name(register_t cause)
259 {
260 	if (CAUSE_INTERRUPT_P(cause))
261 		return "interrupt";
262 	const char *name = "(unk)";
263 	if (cause < __arraycount(causenames) && causenames[cause] != NULL)
264 		name = causenames[cause];
265 
266 	return name;
267 }
268 
269 void
dump_trapframe(const struct trapframe * tf,void (* pr)(const char *,...))270 dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...))
271 {
272 	const char *name = cause_name(tf->tf_cause);
273 	static const char *regname[] = {
274 	           "ra",  "sp",  "gp",	//  x0,  x1,  x2,  x3,
275 	    "tp",  "t0",  "t1",  "t2",	//  x4,  x5,  x6,  x7,
276 	    "s0",  "s1",  "a0",  "a1",	//  x8,  x9, x10, x11,
277 	    "a2",  "a3",  "a4",  "a5",	// x12, x13, x14, x15,
278 	    "a6",  "a7",  "s2",  "s3",	// x16, x17, x18, x19,
279 	    "s4",  "s5",  "s6",  "s7",	// x20, x21, x22, x23,
280 	    "s8",  "s9", "s10", "s11",	// x24, x25, x26, x27,
281 	    "t3",  "t4",  "t5",  "t6",	// x28, x29, x30, x31,
282 	};
283 
284 	(*pr)("Trapframe @ %p "
285 	    "(cause=%d (%s), status=%#x, pc=%#18" PRIxREGISTER
286 	    ", va=%#" PRIxREGISTER "):\n",
287 	    tf, tf->tf_cause, name, tf->tf_sr, tf->tf_pc, tf->tf_tval);
288 
289 	(*pr)("                        ");
290 	for (unsigned reg = 1; reg < 32; reg++) {
291 		(*pr)("%-3s=%#18" PRIxREGISTER "  ",
292 		    regname[reg - 1],
293 		    tf->tf_regs.r_reg[reg - 1]);
294 		if (reg % 4 == 3)
295 			(*pr)("\n");
296 	}
297 }
298 
299 static inline void
trap_ksi_init(ksiginfo_t * ksi,int signo,int code,vaddr_t addr,register_t cause)300 trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr,
301      register_t cause)
302 {
303 	KSI_INIT_TRAP(ksi);
304 	ksi->ksi_signo = signo;
305 	ksi->ksi_code = code;
306 	ksi->ksi_addr = (void *)addr;
307 	ksi->ksi_trap = cause;
308 }
309 
310 static void
cpu_trapsignal(struct trapframe * tf,ksiginfo_t * ksi)311 cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi)
312 {
313 	if (cpu_printfataltraps) {
314 		dump_trapframe(tf, printf);
315 	}
316 	(*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi);
317 }
318 
319 static inline vm_prot_t
get_faulttype(register_t cause)320 get_faulttype(register_t cause)
321 {
322 	if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_LOAD_PAGE_FAULT)
323 		return VM_PROT_READ;
324 	if (cause == CAUSE_STORE_ACCESS || cause == CAUSE_STORE_PAGE_FAULT)
325 		return VM_PROT_WRITE;
326 	KASSERT(cause == CAUSE_FETCH_ACCESS || cause == CAUSE_FETCH_PAGE_FAULT);
327 	return VM_PROT_EXECUTE;
328 }
329 
330 static bool
trap_pagefault_fixup(struct trapframe * tf,struct pmap * pmap,register_t cause,intptr_t addr)331 trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause,
332     intptr_t addr)
333 {
334 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr);
335 	struct vm_page *pg;
336 
337 	if (ptep == NULL)
338 		return false;
339 
340 	pt_entry_t opte = *ptep;
341 	if (!pte_valid_p(opte))
342 		return false;
343 
344 	pt_entry_t npte;
345 	u_int attr;
346 	do {
347 		/* TODO: PTE_G is just the kernel PTE, but all pages
348 		 * can fault for CAUSE_LOAD_PAGE_FAULT and
349 		 * CAUSE_STORE_PAGE_FAULT...*/
350 		/* if ((opte & ~PTE_G) == 0) */
351 		/* 	return false; */
352 
353 		pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte));
354 		if (pg == NULL)
355 			return false;
356 
357 		attr = 0;
358 		npte = opte;
359 
360 		switch (cause) {
361 		case CAUSE_STORE_ACCESS:
362 			if ((npte & PTE_W) != 0) {
363 				npte |= PTE_A | PTE_D;
364 				attr |= VM_PAGEMD_MODIFIED;
365 			}
366 			break;
367 		case CAUSE_STORE_PAGE_FAULT:
368 			if ((npte & PTE_D) == 0) {
369 				npte |= PTE_A | PTE_D;
370 				attr |= VM_PAGEMD_REFERENCED | VM_PAGEMD_MODIFIED;
371 			}
372 			break;
373 		case CAUSE_FETCH_ACCESS:
374 		case CAUSE_FETCH_PAGE_FAULT:
375 #if 0
376 			if ((npte & PTE_NX) != 0) {
377 				npte &= ~PTE_NX;
378 				attr |= VM_PAGEMD_EXECPAGE;
379 			}
380 #endif
381 			break;
382 		default:
383 			panic("%s: Unhandled cause!", __func__);
384 		}
385 		if (attr == 0)
386 			return false;
387 	} while (opte != atomic_cas_pte(ptep, opte, npte));
388 
389 	pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr);
390 	pmap_tlb_update_addr(pmap, addr, npte, 0);
391 
392 	if (attr & VM_PAGEMD_EXECPAGE)
393 		pmap_md_page_syncicache(VM_PAGE_TO_MD(pg),
394 		    curcpu()->ci_kcpuset);
395 
396 	return true;
397 }
398 
399 static bool
trap_pagefault(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t tval,bool usertrap_p,ksiginfo_t * ksi)400 trap_pagefault(struct trapframe *tf, register_t epc, register_t status,
401     register_t cause, register_t tval, bool usertrap_p, ksiginfo_t *ksi)
402 {
403 	struct proc * const p = curlwp->l_proc;
404 	const intptr_t addr = trunc_page(tval);
405 
406 	if (__predict_false(usertrap_p
407 	    && (false
408 		// Make this address is not trying to access kernel space.
409 		|| addr < 0
410 #ifdef _LP64
411 		// If this is a process using a 32-bit address space, make
412 		// sure the address is a signed 32-bit number.
413 		|| ((p->p_flag & PK_32) && (int32_t) addr != addr)
414 #endif
415 		|| false))) {
416 		trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause);
417 		return false;
418 	}
419 
420 	struct vm_map * const map = (addr >= 0 ?
421 	    &p->p_vmspace->vm_map : kernel_map);
422 
423 	// See if this fault is for reference/modified/execpage tracking
424 	if (trap_pagefault_fixup(tf, map->pmap, cause, addr))
425 		return true;
426 
427 #ifdef PMAP_FAULTINFO
428 	struct pcb * const pcb = lwp_getpcb(curlwp);
429 	struct pcb_faultinfo * const pfi = &pcb->pcb_faultinfo;
430 
431 	if (p->p_pid == pfi->pfi_lastpid && addr == pfi->pfi_faultaddr) {
432 		if (++pfi->pfi_repeats > 4) {
433 			tlb_asid_t asid = tlb_get_asid();
434 			pt_entry_t *ptep = pfi->pfi_faultptep;
435 			printf("%s: fault #%u (%s) for %#" PRIxVADDR
436 			    "(%#"PRIxVADDR") at pc %#"PRIxVADDR" curpid=%u/%u "
437 			    "ptep@%p=%#"PRIxPTE")\n", __func__,
438 			    pfi->pfi_repeats, cause_name(tf->tf_cause),
439 			    tval, addr, epc, map->pmap->pm_pai[0].pai_asid,
440 			    asid, ptep, ptep ? pte_value(*ptep) : 0);
441 			if (pfi->pfi_repeats >= 4) {
442 				cpu_Debugger();
443 			} else {
444 				pfi->pfi_cause = cause;
445 			}
446 		}
447 	} else {
448 		pfi->pfi_lastpid = p->p_pid;
449 		pfi->pfi_faultaddr = addr;
450 		pfi->pfi_repeats = 0;
451 		pfi->pfi_faultptep = NULL;
452 		pfi->pfi_cause = cause;
453 	}
454 #endif /* PMAP_FAULTINFO */
455 
456 	const vm_prot_t ftype = get_faulttype(cause);
457 
458 	if (usertrap_p) {
459 		int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype);
460 		if (error) {
461 			int signo = SIGSEGV;
462 			int code = SEGV_MAPERR;
463 
464 			switch (error) {
465 			case ENOMEM: {
466 				struct lwp * const l = curlwp;
467 				printf("UVM: pid %d (%s), uid %d killed: "
468 				    "out of swap\n",
469 				    l->l_proc->p_pid, l->l_proc->p_comm,
470 				    l->l_cred ?
471 					kauth_cred_geteuid(l->l_cred) : -1);
472 				signo = SIGKILL;
473 				code = 0;
474 				break;
475 			    }
476 			case EACCES:
477 				KASSERT(signo == SIGSEGV);
478 				code = SEGV_ACCERR;
479 				break;
480 			case EINVAL:
481 				signo = SIGBUS;
482 				code = BUS_ADRERR;
483 				break;
484 			}
485 
486 			trap_ksi_init(ksi, signo, code, (intptr_t)tval, cause);
487 			return false;
488 		}
489 		uvm_grow(p, addr);
490 
491 		return true;
492 	}
493 
494 	// Page faults are not allowed while dealing with interrupts
495 	if (cpu_intr_p())
496 		return false;
497 
498 	struct faultbuf * const fb = cpu_disable_onfault();
499 	int error = uvm_fault(map, addr, ftype);
500 	cpu_enable_onfault(fb);
501 
502 	if (error == 0) {
503 		if (map != kernel_map) {
504 			uvm_grow(p, addr);
505 		}
506 		return true;
507 	}
508 
509 	if (fb == NULL) {
510 		return false;
511 	}
512 
513 	cpu_jump_onfault(tf, fb);
514 	return true;
515 }
516 
517 static bool
trap_instruction(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t tval,bool usertrap_p,ksiginfo_t * ksi)518 trap_instruction(struct trapframe *tf, register_t epc, register_t status,
519     register_t cause, register_t tval, bool usertrap_p, ksiginfo_t *ksi)
520 {
521 	if (usertrap_p) {
522 		if (__SHIFTOUT(tf->tf_sr, SR_FS) == SR_FS_OFF) {
523 			fpu_load();
524 			return true;
525 		}
526 
527 		trap_ksi_init(ksi, SIGILL, ILL_ILLOPC,
528 		    (intptr_t)tval, cause);
529 	}
530 	return false;
531 }
532 
533 static bool
trap_misalignment(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t tval,bool usertrap_p,ksiginfo_t * ksi)534 trap_misalignment(struct trapframe *tf, register_t epc, register_t status,
535     register_t cause, register_t tval, bool usertrap_p, ksiginfo_t *ksi)
536 {
537 	if (usertrap_p) {
538 		trap_ksi_init(ksi, SIGBUS, BUS_ADRALN,
539 		    (intptr_t)tval, cause);
540 	}
541 	return false;
542 }
543 
544 void
cpu_trap(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t tval)545 cpu_trap(struct trapframe *tf, register_t epc, register_t status,
546     register_t cause, register_t tval)
547 {
548 	const register_t code = CAUSE_CODE(cause);
549 	const register_t fault_mask = __BIT(code);
550 	const intptr_t addr = tval;
551 	const bool usertrap_p = (status & SR_SPP) == 0;
552 	bool ok = true;
553 	ksiginfo_t ksi;
554 
555 	KASSERT(!CAUSE_INTERRUPT_P(cause));
556 	KASSERT(__SHIFTOUT(tf->tf_sr, SR_SIE) == 0);
557 
558 	/* We can allow interrupts now */
559 	csr_sstatus_set(SR_SIE);
560 
561 	if (__predict_true(fault_mask & FAULT_TRAP_MASK)) {
562 #ifndef _LP64
563 #if 0
564 		// This fault may be cause the kernel's page table got a new
565 		// page table page and this pmap's page table doesn't know
566 		// about it.  See
567 		struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
568 		if ((intptr_t) addr < 0
569 		    && pmap != pmap_kernel()
570 		    && pmap_pdetab_fixup(pmap, addr)) {
571 			return;
572 		}
573 #endif
574 #endif
575 		ok = trap_pagefault(tf, epc, status, cause, addr,
576 		    usertrap_p, &ksi);
577 	} else if (fault_mask & INSTRUCTION_TRAP_MASK) {
578 		ok = trap_instruction(tf, epc, status, cause, addr,
579 		    usertrap_p, &ksi);
580 	} else if (fault_mask & SYSCALL_TRAP_MASK) {
581 		panic("cpu_exception_handler failure");
582 	} else if (fault_mask & MISALIGNED_TRAP_MASK) {
583 		ok = trap_misalignment(tf, epc, status, cause, addr,
584 		    usertrap_p, &ksi);
585 	} else if (fault_mask & BREAKPOINT_TRAP_MASK) {
586 		if (!usertrap_p) {
587 			dump_trapframe(tf, printf);
588 #if defined(DDB)
589 			kdb_trap(cause, tf);
590 			PC_BREAK_ADVANCE(tf);
591 			return;	/* KERN */
592 #endif
593 			panic("%s: unknown kernel trap", __func__);
594 		}
595 	}
596 
597 	if (usertrap_p) {
598 		if (!ok)
599 			cpu_trapsignal(tf, &ksi);
600 
601 		userret(curlwp);
602 	} else if (!ok) {
603 		dump_trapframe(tf, printf);
604 		panic("%s: fatal kernel trap", __func__);
605 	}
606 	/*
607 	 * Ensure interrupts are disabled in sstatus, and that interrupts
608 	 * will get enabled on 'sret' for userland.
609 	 */
610 	KASSERT(__SHIFTOUT(tf->tf_sr, SR_SIE) == 0);
611 	KASSERT(__SHIFTOUT(tf->tf_sr, SR_SPIE) != 0 ||
612 	    __SHIFTOUT(tf->tf_sr, SR_SPP) != 0);
613 }
614 
615 void
cpu_ast(struct trapframe * tf)616 cpu_ast(struct trapframe *tf)
617 {
618 	struct lwp * const l = curlwp;
619 
620 	/*
621 	 * allow to have a chance of context switch just prior to user
622 	 * exception return.
623 	 */
624 #ifdef __HAVE_PREEMPTION
625 	kpreempt_disable();
626 #endif
627 	struct cpu_info * const ci = curcpu();
628 
629 	ci->ci_data.cpu_ntrap++;
630 
631 	KDASSERT(ci->ci_cpl == IPL_NONE);
632 #ifdef __HAVE_PREEMPTION
633 	kpreempt_enable();
634 #endif
635 
636 	if (curlwp->l_pflag & LP_OWEUPC) {
637 		curlwp->l_pflag &= ~LP_OWEUPC;
638 		ADDUPROF(curlwp);
639 	}
640 
641 	userret(l);
642 }
643 
644 
645 static int
fetch_user_data(const void * uaddr,void * valp,size_t size)646 fetch_user_data(const void *uaddr, void *valp, size_t size)
647 {
648 	struct faultbuf fb;
649 	int error;
650 
651 	const vaddr_t uva = (vaddr_t)uaddr;
652 	if (__predict_false(uva > VM_MAXUSER_ADDRESS - size))
653 		return EFAULT;
654 
655 	if ((error = cpu_set_onfault(&fb, 1)) != 0)
656 		return error;
657 
658 	csr_sstatus_set(SR_SUM);
659 	switch (size) {
660 	case 1:
661 		*(uint8_t *)valp = *(volatile const uint8_t *)uaddr;
662 		break;
663 	case 2:
664 		*(uint16_t *)valp = *(volatile const uint16_t *)uaddr;
665 		break;
666 	case 4:
667 		*(uint32_t *)valp = *(volatile const uint32_t *)uaddr;
668 		break;
669 #ifdef _LP64
670 	case 8:
671 		*(uint64_t *)valp = *(volatile const uint64_t *)uaddr;
672 		break;
673 #endif /* _LP64 */
674 	default:
675 		error = EINVAL;
676 	}
677 	csr_sstatus_clear(SR_SUM);
678 
679 	cpu_unset_onfault();
680 
681 	return error;
682 }
683 
684 int
_ufetch_8(const uint8_t * uaddr,uint8_t * valp)685 _ufetch_8(const uint8_t *uaddr, uint8_t *valp)
686 {
687 	return fetch_user_data(uaddr, valp, sizeof(*valp));
688 }
689 
690 int
_ufetch_16(const uint16_t * uaddr,uint16_t * valp)691 _ufetch_16(const uint16_t *uaddr, uint16_t *valp)
692 {
693 	return fetch_user_data(uaddr, valp, sizeof(*valp));
694 }
695 
696 int
_ufetch_32(const uint32_t * uaddr,uint32_t * valp)697 _ufetch_32(const uint32_t *uaddr, uint32_t *valp)
698 {
699 	return fetch_user_data(uaddr, valp, sizeof(*valp));
700 }
701 
702 #ifdef _LP64
703 int
_ufetch_64(const uint64_t * uaddr,uint64_t * valp)704 _ufetch_64(const uint64_t *uaddr, uint64_t *valp)
705 {
706 	return fetch_user_data(uaddr, valp, sizeof(*valp));
707 }
708 #endif /* _LP64 */
709 
710 static int
store_user_data(void * uaddr,const void * valp,size_t size)711 store_user_data(void *uaddr, const void *valp, size_t size)
712 {
713 	struct faultbuf fb;
714 	int error;
715 
716 	const vaddr_t uva = (vaddr_t)uaddr;
717 	if (__predict_false(uva > VM_MAXUSER_ADDRESS - size))
718 		return EFAULT;
719 
720 	if ((error = cpu_set_onfault(&fb, 1)) != 0)
721 		return error;
722 
723 	csr_sstatus_set(SR_SUM);
724 	switch (size) {
725 	case 1:
726 		*(volatile uint8_t *)uaddr = *(const uint8_t *)valp;
727 		break;
728 	case 2:
729 		*(volatile uint16_t *)uaddr = *(const uint8_t *)valp;
730 		break;
731 	case 4:
732 		*(volatile uint32_t *)uaddr = *(const uint32_t *)valp;
733 		break;
734 #ifdef _LP64
735 	case 8:
736 		*(volatile uint64_t *)uaddr = *(const uint64_t *)valp;
737 		break;
738 #endif /* _LP64 */
739 	default:
740 		error = EINVAL;
741 	}
742 	csr_sstatus_clear(SR_SUM);
743 
744 	cpu_unset_onfault();
745 
746 	return error;
747 }
748 
749 int
_ustore_8(uint8_t * uaddr,uint8_t val)750 _ustore_8(uint8_t *uaddr, uint8_t val)
751 {
752 	return store_user_data(uaddr, &val, sizeof(val));
753 }
754 
755 int
_ustore_16(uint16_t * uaddr,uint16_t val)756 _ustore_16(uint16_t *uaddr, uint16_t val)
757 {
758 	return store_user_data(uaddr, &val, sizeof(val));
759 }
760 
761 int
_ustore_32(uint32_t * uaddr,uint32_t val)762 _ustore_32(uint32_t *uaddr, uint32_t val)
763 {
764 	return store_user_data(uaddr, &val, sizeof(val));
765 }
766 
767 #ifdef _LP64
768 int
_ustore_64(uint64_t * uaddr,uint64_t val)769 _ustore_64(uint64_t *uaddr, uint64_t val)
770 {
771 	return store_user_data(uaddr, &val, sizeof(val));
772 }
773 #endif /* _LP64 */
774