1 /*-
2  * Copyright (c) 2014 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to The NetBSD Foundation
6  * by Matt Thomas of 3am Software Foundry.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 
32 #define __PMAP_PRIVATE
33 
34 __RCSID("$NetBSD: trap.c,v 1.1 2015/03/28 16:13:56 matt Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/atomic.h>
39 
40 #include <sys/signal.h>
41 #include <sys/signalvar.h>
42 #include <sys/siginfo.h>
43 
44 #include <uvm/uvm.h>
45 
46 #include <riscv/locore.h>
47 
48 #define	INSTRUCTION_TRAP_MASK	(__BIT(CAUSE_PRIVILEGED_INSTRUCTION) \
49 				|__BIT(CAUSE_ILLEGAL_INSTRUCTION))
50 
51 #define	FAULT_TRAP_MASK		(__BIT(CAUSE_FAULT_FETCH) \
52 				|__BIT(CAUSE_FAULT_LOAD) \
53 				|__BIT(CAUSE_FAULT_STORE))
54 
55 #define	MISALIGNED_TRAP_MASK	(__BIT(CAUSE_MISALIGNED_FETCH) \
56 				|__BIT(CAUSE_MISALIGNED_LOAD) \
57 				|__BIT(CAUSE_MISALIGNED_STORE))
58 
59 static const char * const causenames[] = {
60 	[CAUSE_MISALIGNED_FETCH] = "misaligned fetch",
61 	[CAUSE_MISALIGNED_LOAD] = "mialigned load",
62 	[CAUSE_MISALIGNED_STORE] = "misaligned store",
63 	[CAUSE_FAULT_FETCH] = "fetch",
64 	[CAUSE_FAULT_LOAD] = "load",
65 	[CAUSE_FAULT_STORE] = "store",
66 	[CAUSE_FP_DISABLED] = "fp disabled",
67 	[CAUSE_ILLEGAL_INSTRUCTION] = "illegal instruction",
68 	[CAUSE_PRIVILEGED_INSTRUCTION] = "privileged instruction",
69 	[CAUSE_BREAKPOINT] = "breakpoint",
70 };
71 
72 void
cpu_jump_onfault(struct trapframe * tf,const struct faultbuf * fb)73 cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb)
74 {
75 	tf->tf_a0 = fb->fb_reg[FB_A0];
76 	tf->tf_ra = fb->fb_reg[FB_RA];
77 	tf->tf_s0 = fb->fb_reg[FB_S0];
78 	tf->tf_s1 = fb->fb_reg[FB_S1];
79 	tf->tf_s2 = fb->fb_reg[FB_S2];
80 	tf->tf_s3 = fb->fb_reg[FB_S3];
81 	tf->tf_s4 = fb->fb_reg[FB_S4];
82 	tf->tf_s5 = fb->fb_reg[FB_S5];
83 	tf->tf_s6 = fb->fb_reg[FB_S6];
84 	tf->tf_s7 = fb->fb_reg[FB_S7];
85 	tf->tf_s8 = fb->fb_reg[FB_S8];
86 	tf->tf_s9 = fb->fb_reg[FB_S9];
87 	tf->tf_s10 = fb->fb_reg[FB_S10];
88 	tf->tf_s11 = fb->fb_reg[FB_S11];
89 }
90 
91 int
copyin(const void * uaddr,void * kaddr,size_t len)92 copyin(const void *uaddr, void *kaddr, size_t len)
93 {
94 	struct faultbuf fb;
95 	int error;
96 
97 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
98 		memcpy(kaddr, uaddr, len);
99 		cpu_unset_onfault();
100 	}
101 	return error;
102 }
103 
104 int
copyout(const void * kaddr,void * uaddr,size_t len)105 copyout(const void *kaddr, void *uaddr, size_t len)
106 {
107 	struct faultbuf fb;
108 	int error;
109 
110 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
111 		memcpy(uaddr, kaddr, len);
112 		cpu_unset_onfault();
113 	}
114 	return error;
115 }
116 
117 int
kcopy(const void * kfaddr,void * kdaddr,size_t len)118 kcopy(const void *kfaddr, void *kdaddr, size_t len)
119 {
120 	struct faultbuf fb;
121 	int error;
122 
123 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
124 		memcpy(kdaddr, kfaddr, len);
125 		cpu_unset_onfault();
126 	}
127 	return error;
128 }
129 
130 int
copystr(const void * kfaddr,void * kdaddr,size_t len,size_t * done)131 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
132 {
133 	struct faultbuf fb;
134 	int error;
135 
136 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
137 		len = strlcpy(kdaddr, kfaddr, len);
138 		cpu_unset_onfault();
139 		if (done != NULL) {
140 			*done = len;
141 		}
142 	}
143 	return error;
144 }
145 
146 int
copyinstr(const void * uaddr,void * kaddr,size_t len,size_t * done)147 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
148 {
149 	struct faultbuf fb;
150 	int error;
151 
152 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
153 		len = strlcpy(kaddr, uaddr, len);
154 		cpu_unset_onfault();
155 		if (done != NULL) {
156 			*done = len;
157 		}
158 	}
159 	return error;
160 }
161 
162 int
copyoutstr(const void * kaddr,void * uaddr,size_t len,size_t * done)163 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
164 {
165 	struct faultbuf fb;
166 	int error;
167 
168 	if ((error = cpu_set_onfault(&fb, EFAULT)) == 0) {
169 		len = strlcpy(uaddr, kaddr, len);
170 		cpu_unset_onfault();
171 		if (done != NULL) {
172 			*done = len;
173 		}
174 	}
175 	return error;
176 }
177 
178 static void
dump_trapframe(const struct trapframe * tf,void (* pr)(const char *,...))179 dump_trapframe(const struct trapframe *tf, void (*pr)(const char *, ...))
180 {
181 	const char *causestr = "?";
182 	if (tf->tf_cause < __arraycount(causenames)
183 	    && causenames[tf->tf_cause] != NULL)
184 		causestr = causenames[tf->tf_cause];
185 	(*pr)("Trapframe @ %p "
186 	    "(cause=%d (%s), status=%#x, pc=%#16"PRIxREGISTER
187 	    ", va=%#"PRIxREGISTER"):\n",
188 	    tf, tf->tf_cause, causestr, tf->tf_sr, tf->tf_pc, tf->tf_badaddr);
189 	(*pr)("ra=%#16"PRIxREGISTER", sp=%#16"PRIxREGISTER
190 	    ", gp=%#16"PRIxREGISTER", tp=%#16"PRIxREGISTER"\n",
191 	    tf->tf_ra, tf->tf_sp, tf->tf_gp, tf->tf_tp);
192 	(*pr)("s0=%#16"PRIxREGISTER", s1=%#16"PRIxREGISTER
193 	    ", s2=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n",
194 	    tf->tf_s0, tf->tf_s1, tf->tf_s2, tf->tf_s3);
195 	(*pr)("s4=%#16"PRIxREGISTER", s5=%#16"PRIxREGISTER
196 	    ", s5=%#16"PRIxREGISTER", s3=%#16"PRIxREGISTER"\n",
197 	    tf->tf_s4, tf->tf_s5, tf->tf_s2, tf->tf_s3);
198 	(*pr)("s8=%#16"PRIxREGISTER", s9=%#16"PRIxREGISTER
199 	    ", s10=%#16"PRIxREGISTER", s11=%#16"PRIxREGISTER"\n",
200 	    tf->tf_s8, tf->tf_s9, tf->tf_s10, tf->tf_s11);
201 	(*pr)("a0=%#16"PRIxREGISTER", a1=%#16"PRIxREGISTER
202 	    ", a2=%#16"PRIxREGISTER", a3=%#16"PRIxREGISTER"\n",
203 	    tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3);
204 	(*pr)("a4=%#16"PRIxREGISTER", a5=%#16"PRIxREGISTER
205 	    ", a5=%#16"PRIxREGISTER", a7=%#16"PRIxREGISTER"\n",
206 	    tf->tf_a4, tf->tf_a5, tf->tf_a6, tf->tf_a7);
207 	(*pr)("t0=%#16"PRIxREGISTER", t1=%#16"PRIxREGISTER
208 	    ", t2=%#16"PRIxREGISTER", t3=%#16"PRIxREGISTER"\n",
209 	    tf->tf_t0, tf->tf_t1, tf->tf_t2, tf->tf_t3);
210 	(*pr)("t4=%#16"PRIxREGISTER", t5=%#16"PRIxREGISTER
211 	    ", t6=%#16"PRIxREGISTER"\n",
212 	    tf->tf_t4, tf->tf_t5, tf->tf_t6);
213 }
214 
215 static inline void
trap_ksi_init(ksiginfo_t * ksi,int signo,int code,vaddr_t addr,register_t cause)216 trap_ksi_init(ksiginfo_t *ksi, int signo, int code, vaddr_t addr,
217      register_t cause)
218 {
219 	KSI_INIT_TRAP(ksi);
220 	ksi->ksi_signo = signo;
221 	ksi->ksi_code = code;
222 	ksi->ksi_addr = (void *)addr;
223 	ksi->ksi_trap = cause;
224 }
225 
226 static void
cpu_trapsignal(struct trapframe * tf,ksiginfo_t * ksi)227 cpu_trapsignal(struct trapframe *tf, ksiginfo_t *ksi)
228 {
229 	if (cpu_printfataltraps) {
230 		dump_trapframe(tf, printf);
231 	}
232 	(*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, ksi);
233 }
234 
235 static inline vm_prot_t
get_faulttype(register_t cause)236 get_faulttype(register_t cause)
237 {
238 	if (cause == CAUSE_FAULT_LOAD)
239 		return VM_PROT_READ;
240 	if (cause == CAUSE_FAULT_STORE)
241 		return VM_PROT_READ | VM_PROT_WRITE;
242 	KASSERT(cause == CAUSE_FAULT_FETCH);
243 	return VM_PROT_READ | VM_PROT_EXECUTE;
244 }
245 
246 static bool
trap_pagefault_fixup(struct trapframe * tf,struct pmap * pmap,register_t cause,intptr_t addr)247 trap_pagefault_fixup(struct trapframe *tf, struct pmap *pmap, register_t cause,
248     intptr_t addr)
249 {
250 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, addr, NULL);
251 	struct vm_page *pg;
252 
253 	if (ptep == NULL)
254 		return false;
255 
256 	pt_entry_t opte = *ptep;
257 	pt_entry_t npte;
258 	u_int attr;
259 	do {
260 		if ((opte & ~PTE_G) == 0)
261 			return false;
262 
263 		pg = PHYS_TO_VM_PAGE(pte_to_paddr(opte));
264 		if (pg == NULL)
265 			return false;
266 
267 		attr = 0;
268 		npte = opte;
269 		if ((npte & PTE_V) == 0) {
270 			npte |= PTE_V;
271 			attr |= VM_PAGEMD_REFERENCED;
272 		}
273 		if (cause == CAUSE_FAULT_STORE) {
274 			if ((npte & PTE_NW) != 0) {
275 				npte &= ~PTE_NW;
276 				attr |= VM_PAGEMD_MODIFIED;
277 			}
278 		} else if (cause == CAUSE_FAULT_FETCH) {
279 			if ((npte & PTE_NX) != 0) {
280 				npte &= ~PTE_NX;
281 				attr |= VM_PAGEMD_EXECPAGE;
282 			}
283 		}
284 
285 		if (attr == 0)
286 			return false;
287 
288 	} while (opte != atomic_cas_pte(ptep, opte, npte));
289 
290 	pmap_page_set_attributes(VM_PAGE_TO_MD(pg), attr);
291 	pmap_tlb_update_addr(pmap, addr, npte, 0);
292 
293 	if (attr & VM_PAGEMD_EXECPAGE)
294 		 pmap_md_page_syncicache(pg, curcpu()->ci_data.cpu_kcpuset);
295 
296 	return true;
297 }
298 
299 static bool
trap_pagefault(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t badaddr,bool usertrap_p,ksiginfo_t * ksi)300 trap_pagefault(struct trapframe *tf, register_t epc, register_t status,
301     register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi)
302 {
303 	struct proc * const p = curlwp->l_proc;
304 	const intptr_t addr = trunc_page(badaddr);
305 
306 	if (__predict_false(usertrap_p
307 	    && (false
308 		// Make this address is not trying to access kernel space.
309 		|| addr < 0
310 #ifdef _LP64
311 		// If this is a process using a 32-bit address space, make
312 		// sure the address is a signed 32-bit number.
313 		|| ((p->p_flag & PK_32) && (int32_t) addr != addr)
314 #endif
315 		|| false))) {
316 		trap_ksi_init(ksi, SIGSEGV, SEGV_MAPERR, addr, cause);
317 		return false;
318 	}
319 
320 	struct vm_map * const map = (addr >= 0 ? &p->p_vmspace->vm_map : kernel_map);
321 
322 	// See if this fault is for reference/modified/execpage tracking
323 	if (trap_pagefault_fixup(tf, map->pmap, cause, addr))
324 		return true;
325 
326 	const vm_prot_t ftype = get_faulttype(cause);
327 
328 	if (usertrap_p) {
329 		int error = uvm_fault(&p->p_vmspace->vm_map, addr, ftype);
330 		if (error) {
331 			trap_ksi_init(ksi, SIGSEGV,
332 			    error == EACCES ? SEGV_ACCERR : SEGV_MAPERR,
333 			    (intptr_t)badaddr, cause);
334 			return false;
335 		}
336 		uvm_grow(p, addr);
337 		return true;
338 	}
339 
340 	// Page fault are not allowed while dealing with interrupts
341 	if (cpu_intr_p())
342 		return false;
343 
344 	struct faultbuf * const fb = cpu_disable_onfault();
345 	int error = uvm_fault(map, addr, ftype);
346 	cpu_enable_onfault(fb);
347 	if (error == 0) {
348 		if (map != kernel_map) {
349 			uvm_grow(p, addr);
350 		}
351 		return true;
352 	}
353 
354 	if (fb == NULL) {
355 		return false;
356 	}
357 
358 	cpu_jump_onfault(tf, fb);
359 	return true;
360 }
361 
362 static bool
trap_instruction(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t badaddr,bool usertrap_p,ksiginfo_t * ksi)363 trap_instruction(struct trapframe *tf, register_t epc, register_t status,
364     register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi)
365 {
366 	const bool prvopc_p = (cause == CAUSE_PRIVILEGED_INSTRUCTION);
367 	if (usertrap_p) {
368 		trap_ksi_init(ksi, SIGILL, prvopc_p ? ILL_PRVOPC : ILL_ILLOPC,
369 		    (intptr_t)badaddr, cause);
370 	}
371 	return false;
372 }
373 
374 static bool
trap_misalignment(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t badaddr,bool usertrap_p,ksiginfo_t * ksi)375 trap_misalignment(struct trapframe *tf, register_t epc, register_t status,
376     register_t cause, register_t badaddr, bool usertrap_p, ksiginfo_t *ksi)
377 {
378 	if (usertrap_p) {
379 		trap_ksi_init(ksi, SIGBUS, BUS_ADRALN,
380 		    (intptr_t)badaddr, cause);
381 	}
382 	return false;
383 }
384 
385 void
cpu_trap(struct trapframe * tf,register_t epc,register_t status,register_t cause,register_t badaddr)386 cpu_trap(struct trapframe *tf, register_t epc, register_t status,
387     register_t cause, register_t badaddr)
388 {
389 	const u_int fault_mask = 1U << cause;
390 	const intptr_t addr = badaddr;
391 	const bool usertrap_p = (status & SR_PS) == 0;
392 	bool ok = true;
393 	ksiginfo_t ksi;
394 
395 	if (__predict_true(fault_mask & FAULT_TRAP_MASK)) {
396 #ifndef _LP64
397 		// This fault may be cause the kernel's page table got a new
398 		// page table page and this pmap's page table doesn't know
399 		// about it.  See
400 		struct pmap * const pmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
401 		if ((intptr_t) addr < 0
402 		    && pmap != pmap_kernel()
403 		    && pmap_pdetab_fixup(pmap, addr)) {
404 			return;
405 		}
406 #endif
407 		ok = trap_pagefault(tf, epc, status, cause, addr,
408 		    usertrap_p, &ksi);
409 	} else if (fault_mask & INSTRUCTION_TRAP_MASK) {
410 		ok = trap_instruction(tf, epc, status, cause, addr,
411 		    usertrap_p, &ksi);
412 	} else if (fault_mask && __BIT(CAUSE_FP_DISABLED)) {
413 		if (!usertrap_p) {
414 			panic("%s: fp used @ %#"PRIxREGISTER" in kernel!",
415 			    __func__, tf->tf_pc);
416 		}
417 		fpu_load();
418 	} else if (fault_mask & MISALIGNED_TRAP_MASK) {
419 		ok = trap_misalignment(tf, epc, status, cause, addr,
420 		    usertrap_p, &ksi);
421 	} else {
422 		dump_trapframe(tf, printf);
423 		panic("%s: unknown kernel trap", __func__);
424 	}
425 
426 	if (usertrap_p) {
427 		if (!ok)
428 			cpu_trapsignal(tf, &ksi);
429 		userret(curlwp);
430 	} else if (!ok) {
431 		dump_trapframe(tf, printf);
432 		panic("%s: fatal kernel trap", __func__);
433 	}
434 }
435 
436 void
cpu_ast(struct trapframe * tf)437 cpu_ast(struct trapframe *tf)
438 {
439 	struct cpu_info * const ci = curcpu();
440 
441 	atomic_swap_uint(&curlwp->l_md.md_astpending, 0);
442 
443 	if (curlwp->l_pflag & LP_OWEUPC) {
444 		curlwp->l_pflag &= ~LP_OWEUPC;
445 		ADDUPROF(curlwp);
446 	}
447 
448 	if (ci->ci_want_resched) {
449 		preempt();
450 	}
451 }
452 
453 union xubuf {
454 	uint8_t b[4];
455 	uint16_t w[2];
456 	uint32_t l[1];
457 };
458 
459 static bool
fetch_user_data(union xubuf * xu,const void * base,size_t len)460 fetch_user_data(union xubuf *xu, const void *base, size_t len)
461 {
462 	struct faultbuf fb;
463 	if (cpu_set_onfault(&fb, 1) == 0) {
464 		memcpy(xu->b, base, len);
465 		cpu_unset_onfault();
466 		return true;
467 	}
468 	return false;
469 }
470 
471 int
fubyte(const void * base)472 fubyte(const void *base)
473 {
474 	union xubuf xu;
475 	if (fetch_user_data(&xu, base, sizeof(xu.b[0])))
476 		return xu.b[0];
477 	return -1;
478 }
479 
480 int
fusword(const void * base)481 fusword(const void *base)
482 {
483 	union xubuf xu;
484 	if (fetch_user_data(&xu, base, sizeof(xu.w[0])))
485 		return xu.w[0];
486 	return -1;
487 }
488 
489 int
fuswintr(const void * base)490 fuswintr(const void *base)
491 {
492 	return -1;
493 }
494 
495 long
fuword(const void * base)496 fuword(const void *base)
497 {
498 	union xubuf xu;
499 	if (fetch_user_data(&xu, base, sizeof(xu.l[0])))
500 		return xu.l[0];
501 	return -1;
502 }
503 
504 static bool
store_user_data(void * base,const union xubuf * xu,size_t len)505 store_user_data(void *base, const union xubuf *xu, size_t len)
506 {
507 	struct faultbuf fb;
508 	if (cpu_set_onfault(&fb, 1) == 0) {
509 		memcpy(base, xu->b, len);
510 		cpu_unset_onfault();
511 		return true;
512 	}
513 	return false;
514 }
515 
516 int
subyte(void * base,int c)517 subyte(void *base, int c)
518 {
519 	union xubuf xu = { .b[0] = c, .b[1 ... 3] = 0 };
520 	return store_user_data(base, &xu, sizeof(xu.b[0])) ? 0 : -1;
521 }
522 
523 int
susword(void * base,short c)524 susword(void *base, short c)
525 {
526 	union xubuf xu = { .w[0] = c, .w[1] = 0 };
527 	return store_user_data(base, &xu, sizeof(xu.w[0])) ? 0 : -1;
528 }
529 
530 int
suswintr(void * base,short c)531 suswintr(void *base, short c)
532 {
533 	return -1;
534 }
535 
536 int
suword(void * base,long c)537 suword(void *base, long c)
538 {
539 	union xubuf xu = { .l[0] = c };
540 	return store_user_data(base, &xu, sizeof(xu.l[0])) ? 0 : -1;
541 }
542 
543 void
cpu_intr(struct trapframe * tf,register_t epc,register_t status,register_t cause)544 cpu_intr(struct trapframe *tf, register_t epc, register_t status,
545     register_t cause)
546 {
547 	/* XXX */
548 }
549