xref: /freebsd/sys/arm64/arm64/trap.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/pioctl.h>
37 #include <sys/proc.h>
38 #include <sys/ptrace.h>
39 #include <sys/syscall.h>
40 #include <sys/sysent.h>
41 #ifdef KDB
42 #include <sys/kdb.h>
43 #endif
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_extern.h>
51 
52 #include <machine/frame.h>
53 #include <machine/pcb.h>
54 #include <machine/pcpu.h>
55 #include <machine/undefined.h>
56 
57 #ifdef KDTRACE_HOOKS
58 #include <sys/dtrace_bsd.h>
59 #endif
60 
61 #ifdef VFP
62 #include <machine/vfp.h>
63 #endif
64 
65 #ifdef KDB
66 #include <machine/db_machdep.h>
67 #endif
68 
69 #ifdef DDB
70 #include <ddb/db_output.h>
71 #endif
72 
73 extern register_t fsu_intr_fault;
74 
75 /* Called from exception.S */
76 void do_el1h_sync(struct thread *, struct trapframe *);
77 void do_el0_sync(struct thread *, struct trapframe *);
78 void do_el0_error(struct trapframe *);
79 void do_serror(struct trapframe *);
80 void unhandled_exception(struct trapframe *);
81 
82 static void print_registers(struct trapframe *frame);
83 
84 int (*dtrace_invop_jump_addr)(struct trapframe *);
85 
86 static __inline void
87 call_trapsignal(struct thread *td, int sig, int code, void *addr)
88 {
89 	ksiginfo_t ksi;
90 
91 	ksiginfo_init_trap(&ksi);
92 	ksi.ksi_signo = sig;
93 	ksi.ksi_code = code;
94 	ksi.ksi_addr = addr;
95 	trapsignal(td, &ksi);
96 }
97 
98 int
99 cpu_fetch_syscall_args(struct thread *td)
100 {
101 	struct proc *p;
102 	register_t *ap;
103 	struct syscall_args *sa;
104 	int nap;
105 
106 	nap = 8;
107 	p = td->td_proc;
108 	ap = td->td_frame->tf_x;
109 	sa = &td->td_sa;
110 
111 	sa->code = td->td_frame->tf_x[8];
112 
113 	if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
114 		sa->code = *ap++;
115 		nap--;
116 	}
117 
118 	if (sa->code >= p->p_sysent->sv_size)
119 		sa->callp = &p->p_sysent->sv_table[0];
120 	else
121 		sa->callp = &p->p_sysent->sv_table[sa->code];
122 
123 	sa->narg = sa->callp->sy_narg;
124 	memcpy(sa->args, ap, nap * sizeof(register_t));
125 	if (sa->narg > nap)
126 		panic("ARM64TODO: Could we have more than 8 args?");
127 
128 	td->td_retval[0] = 0;
129 	td->td_retval[1] = 0;
130 
131 	return (0);
132 }
133 
134 #include "../../kern/subr_syscall.c"
135 
136 static void
137 svc_handler(struct thread *td, struct trapframe *frame)
138 {
139 	int error;
140 
141 	if ((frame->tf_esr & ESR_ELx_ISS_MASK) == 0) {
142 		error = syscallenter(td);
143 		syscallret(td, error);
144 	} else {
145 		call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr);
146 		userret(td, frame);
147 	}
148 }
149 
150 static void
151 data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
152     uint64_t far, int lower, int exec)
153 {
154 	struct vm_map *map;
155 	struct proc *p;
156 	struct pcb *pcb;
157 	vm_prot_t ftype;
158 	vm_offset_t va;
159 	int error, sig, ucode;
160 #ifdef KDB
161 	bool handled;
162 #endif
163 
164 	/*
165 	 * According to the ARMv8-A rev. A.g, B2.10.5 "Load-Exclusive
166 	 * and Store-Exclusive instruction usage restrictions", state
167 	 * of the exclusive monitors after data abort exception is unknown.
168 	 */
169 	clrex();
170 
171 #ifdef KDB
172 	if (kdb_active) {
173 		kdb_reenter();
174 		return;
175 	}
176 #endif
177 
178 	pcb = td->td_pcb;
179 	p = td->td_proc;
180 	if (lower)
181 		map = &p->p_vmspace->vm_map;
182 	else {
183 		/* The top bit tells us which range to use */
184 		if (far >= VM_MAXUSER_ADDRESS) {
185 			map = kernel_map;
186 		} else {
187 			map = &p->p_vmspace->vm_map;
188 			if (map == NULL)
189 				map = kernel_map;
190 		}
191 	}
192 
193 	/*
194 	 * The call to pmap_fault can be dangerous when coming from the
195 	 * kernel as it may be not be able to lock the pmap to check if
196 	 * the address is now valid. Because of this we filter the cases
197 	 * when we are not going to see superpage activity.
198 	 */
199 	if (!lower) {
200 		/*
201 		 * We may fault in a DMAP region due to a superpage being
202 		 * unmapped when the access took place.
203 		 */
204 		if (map == kernel_map && !VIRT_IN_DMAP(far))
205 			goto no_pmap_fault;
206 		/*
207 		 * We can also fault in the userspace handling functions,
208 		 * e.g. copyin. In these cases we will have set a fault
209 		 * handler so we can check if this is set before calling
210 		 * pmap_fault.
211 		 */
212 		if (map != kernel_map && pcb->pcb_onfault == 0)
213 			goto no_pmap_fault;
214 	}
215 
216 	if (pmap_fault(map->pmap, esr, far) == KERN_SUCCESS)
217 		return;
218 
219 no_pmap_fault:
220 	KASSERT(td->td_md.md_spinlock_count == 0,
221 	    ("data abort with spinlock held"));
222 	if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
223 	    WARN_GIANTOK, NULL, "Kernel page fault") != 0) {
224 		print_registers(frame);
225 		printf(" far: %16lx\n", far);
226 		printf(" esr:         %.8lx\n", esr);
227 		panic("data abort in critical section or under mutex");
228 	}
229 
230 	va = trunc_page(far);
231 	ftype = ((esr >> 6) & 1) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
232 	if (exec)
233 		ftype |= VM_PROT_EXECUTE;
234 
235 	/* Fault in the page. */
236 	error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
237 	if (error != KERN_SUCCESS) {
238 		if (lower) {
239 			sig = SIGSEGV;
240 			if (error == KERN_PROTECTION_FAILURE)
241 				ucode = SEGV_ACCERR;
242 			else
243 				ucode = SEGV_MAPERR;
244 			call_trapsignal(td, sig, ucode, (void *)far);
245 		} else {
246 			if (td->td_intr_nesting_level == 0 &&
247 			    pcb->pcb_onfault != 0) {
248 				frame->tf_x[0] = error;
249 				frame->tf_elr = pcb->pcb_onfault;
250 				return;
251 			}
252 
253 			printf("Fatal data abort:\n");
254 			print_registers(frame);
255 			printf(" far: %16lx\n", far);
256 			printf(" esr:         %.8lx\n", esr);
257 
258 #ifdef KDB
259 			if (debugger_on_trap) {
260 				kdb_why = KDB_WHY_TRAP;
261 				handled = kdb_trap(ESR_ELx_EXCEPTION(esr), 0,
262 				    frame);
263 				kdb_why = KDB_WHY_UNSET;
264 				if (handled)
265 					return;
266 			}
267 #endif
268 			panic("vm_fault failed: %lx", frame->tf_elr);
269 		}
270 	}
271 
272 	if (lower)
273 		userret(td, frame);
274 }
275 
276 static void
277 print_registers(struct trapframe *frame)
278 {
279 	u_int reg;
280 
281 	for (reg = 0; reg < nitems(frame->tf_x); reg++) {
282 		printf(" %sx%d: %16lx\n", (reg < 10) ? " " : "", reg,
283 		    frame->tf_x[reg]);
284 	}
285 	printf("  sp: %16lx\n", frame->tf_sp);
286 	printf("  lr: %16lx\n", frame->tf_lr);
287 	printf(" elr: %16lx\n", frame->tf_elr);
288 	printf("spsr:         %8x\n", frame->tf_spsr);
289 }
290 
291 void
292 do_el1h_sync(struct thread *td, struct trapframe *frame)
293 {
294 	struct trapframe *oframe;
295 	uint32_t exception;
296 	uint64_t esr, far;
297 
298 	/* Read the esr register to get the exception details */
299 	esr = frame->tf_esr;
300 	exception = ESR_ELx_EXCEPTION(esr);
301 
302 #ifdef KDTRACE_HOOKS
303 	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception))
304 		return;
305 #endif
306 
307 	CTR4(KTR_TRAP,
308 	    "do_el1_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", td,
309 	    esr, frame->tf_elr, frame);
310 
311 	oframe = td->td_frame;
312 
313 	switch (exception) {
314 	case EXCP_BRK:
315 	case EXCP_WATCHPT_EL1:
316 	case EXCP_SOFTSTP_EL1:
317 		break;
318 	default:
319 		td->td_frame = frame;
320 		break;
321 	}
322 
323 	switch(exception) {
324 	case EXCP_FP_SIMD:
325 	case EXCP_TRAP_FP:
326 #ifdef VFP
327 		if ((td->td_pcb->pcb_fpflags & PCB_FP_KERN) != 0) {
328 			vfp_restore_state();
329 		} else
330 #endif
331 		{
332 			print_registers(frame);
333 			printf(" esr:         %.8lx\n", esr);
334 			panic("VFP exception in the kernel");
335 		}
336 		break;
337 	case EXCP_INSN_ABORT:
338 	case EXCP_DATA_ABORT:
339 		far = READ_SPECIALREG(far_el1);
340 		intr_enable();
341 		data_abort(td, frame, esr, far, 0,
342 		    exception == EXCP_INSN_ABORT);
343 		break;
344 	case EXCP_BRK:
345 #ifdef KDTRACE_HOOKS
346 		if ((esr & ESR_ELx_ISS_MASK) == 0x40d && \
347 		    dtrace_invop_jump_addr != 0) {
348 			dtrace_invop_jump_addr(frame);
349 			break;
350 		}
351 #endif
352 #ifdef KDB
353 		kdb_trap(exception, 0,
354 		    (td->td_frame != NULL) ? td->td_frame : frame);
355 #else
356 		panic("No debugger in kernel.\n");
357 #endif
358 		frame->tf_elr += 4;
359 		break;
360 	case EXCP_WATCHPT_EL1:
361 	case EXCP_SOFTSTP_EL1:
362 #ifdef KDB
363 		kdb_trap(exception, 0,
364 		    (td->td_frame != NULL) ? td->td_frame : frame);
365 #else
366 		panic("No debugger in kernel.\n");
367 #endif
368 		break;
369 	case EXCP_UNKNOWN:
370 		if (undef_insn(1, frame))
371 			break;
372 		/* FALLTHROUGH */
373 	default:
374 		print_registers(frame);
375 		panic("Unknown kernel exception %x esr_el1 %lx\n", exception,
376 		    esr);
377 	}
378 
379 	td->td_frame = oframe;
380 }
381 
382 void
383 do_el0_sync(struct thread *td, struct trapframe *frame)
384 {
385 	pcpu_bp_harden bp_harden;
386 	uint32_t exception;
387 	uint64_t esr, far;
388 
389 	/* Check we have a sane environment when entering from userland */
390 	KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS,
391 	    ("Invalid pcpu address from userland: %p (tpidr %lx)",
392 	     get_pcpu(), READ_SPECIALREG(tpidr_el1)));
393 
394 	esr = frame->tf_esr;
395 	exception = ESR_ELx_EXCEPTION(esr);
396 	switch (exception) {
397 	case EXCP_INSN_ABORT_L:
398 		far = READ_SPECIALREG(far_el1);
399 
400 		/*
401 		 * Userspace may be trying to train the branch predictor to
402 		 * attack the kernel. If we are on a CPU affected by this
403 		 * call the handler to clear the branch predictor state.
404 		 */
405 		if (far > VM_MAXUSER_ADDRESS) {
406 			bp_harden = PCPU_GET(bp_harden);
407 			if (bp_harden != NULL)
408 				bp_harden();
409 		}
410 		break;
411 	case EXCP_UNKNOWN:
412 	case EXCP_DATA_ABORT_L:
413 	case EXCP_DATA_ABORT:
414 		far = READ_SPECIALREG(far_el1);
415 		break;
416 	}
417 	intr_enable();
418 
419 	CTR4(KTR_TRAP,
420 	    "do_el0_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", td, esr,
421 	    frame->tf_elr, frame);
422 
423 	switch(exception) {
424 	case EXCP_FP_SIMD:
425 	case EXCP_TRAP_FP:
426 #ifdef VFP
427 		vfp_restore_state();
428 #else
429 		panic("VFP exception in userland");
430 #endif
431 		break;
432 	case EXCP_SVC32:
433 	case EXCP_SVC64:
434 		svc_handler(td, frame);
435 		break;
436 	case EXCP_INSN_ABORT_L:
437 	case EXCP_DATA_ABORT_L:
438 	case EXCP_DATA_ABORT:
439 		data_abort(td, frame, esr, far, 1,
440 		    exception == EXCP_INSN_ABORT_L);
441 		break;
442 	case EXCP_UNKNOWN:
443 		if (!undef_insn(0, frame))
444 			call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far);
445 		userret(td, frame);
446 		break;
447 	case EXCP_SP_ALIGN:
448 		call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp);
449 		userret(td, frame);
450 		break;
451 	case EXCP_PC_ALIGN:
452 		call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr);
453 		userret(td, frame);
454 		break;
455 	case EXCP_BRK:
456 		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr);
457 		userret(td, frame);
458 		break;
459 	case EXCP_MSR:
460 		call_trapsignal(td, SIGILL, ILL_PRVOPC, (void *)frame->tf_elr);
461 		userret(td, frame);
462 		break;
463 	case EXCP_SOFTSTP_EL0:
464 		td->td_frame->tf_spsr &= ~PSR_SS;
465 		td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
466 		WRITE_SPECIALREG(MDSCR_EL1,
467 		    READ_SPECIALREG(MDSCR_EL1) & ~DBG_MDSCR_SS);
468 		call_trapsignal(td, SIGTRAP, TRAP_TRACE,
469 		    (void *)frame->tf_elr);
470 		userret(td, frame);
471 		break;
472 	default:
473 		call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr);
474 		userret(td, frame);
475 		break;
476 	}
477 
478 	KASSERT((td->td_pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
479 	    ("Kernel VFP flags set while entering userspace"));
480 	KASSERT(
481 	    td->td_pcb->pcb_fpusaved == &td->td_pcb->pcb_fpustate,
482 	    ("Kernel VFP state in use when entering userspace"));
483 }
484 
485 /*
486  * TODO: We will need to handle these later when we support ARMv8.2 RAS.
487  */
488 void
489 do_serror(struct trapframe *frame)
490 {
491 	uint64_t esr, far;
492 
493 	far = READ_SPECIALREG(far_el1);
494 	esr = frame->tf_esr;
495 
496 	print_registers(frame);
497 	printf(" far: %16lx\n", far);
498 	printf(" esr:         %.8lx\n", esr);
499 	panic("Unhandled System Error");
500 }
501 
502 void
503 unhandled_exception(struct trapframe *frame)
504 {
505 	uint64_t esr, far;
506 
507 	far = READ_SPECIALREG(far_el1);
508 	esr = frame->tf_esr;
509 
510 	print_registers(frame);
511 	printf(" far: %16lx\n", far);
512 	printf(" esr:         %.8lx\n", esr);
513 	panic("Unhandled exception");
514 }
515