xref: /freebsd/sys/arm/arm/trap-v6.c (revision d6b92ffa)
1 /*-
2  * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5  * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_ktrace.h"
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/signalvar.h>
43 #include <sys/ktr.h>
44 #include <sys/vmmeter.h>
45 #ifdef KTRACE
46 #include <sys/uio.h>
47 #include <sys/ktrace.h>
48 #endif
49 
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
56 
57 #include <machine/cpu.h>
58 #include <machine/frame.h>
59 #include <machine/machdep.h>
60 #include <machine/pcb.h>
61 
62 #ifdef KDB
63 #include <sys/kdb.h>
64 #include <machine/db_machdep.h>
65 #endif
66 
67 #ifdef KDTRACE_HOOKS
68 #include <sys/dtrace_bsd.h>
69 #endif
70 
71 extern char cachebailout[];
72 
73 #ifdef DEBUG
74 int last_fault_code;	/* For the benefit of pmap_fault_fixup() */
75 #endif
76 
77 struct ksig {
78 	int sig;
79 	u_long code;
80 	vm_offset_t	addr;
81 };
82 
83 typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
84     struct thread *, struct ksig *);
85 
86 static abort_func_t abort_fatal;
87 static abort_func_t abort_align;
88 static abort_func_t abort_icache;
89 
90 struct abort {
91 	abort_func_t	*func;
92 	const char	*desc;
93 };
94 
95 /*
96  * How are the aborts handled?
97  *
98  * Undefined Code:
99  *  - Always fatal as we do not know what does it mean.
100  * Imprecise External Abort:
101  *  - Always fatal, but can be handled somehow in the future.
102  *    Now, due to PCIe buggy hardware, ignored.
103  * Precise External Abort:
104  *  - Always fatal, but who knows in the future???
105  * Debug Event:
106  *  - Special handling.
107  * External Translation Abort (L1 & L2)
108  *  - Always fatal as something is screwed up in page tables or hardware.
109  * Domain Fault (L1 & L2):
110  *  - Always fatal as we do not play game with domains.
111  * Alignment Fault:
112  *  - Everything should be aligned in kernel with exception of user to kernel
113  *    and vice versa data copying, so if pcb_onfault is not set, it's fatal.
114  *    We generate signal in case of abort from user mode.
115  * Instruction cache maintenance:
116  *  - According to manual, this is translation fault during cache maintenance
117  *    operation. So, it could be really complex in SMP case and fuzzy too
118  *    for cache operations working on virtual addresses. For now, we will
119  *    consider this abort as fatal. In fact, no cache maintenance on
120  *    not mapped virtual addresses should be called. As cache maintenance
121  *    operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
122  *    the abort is fatal for user mode as well for now. (This is good place to
123  *    note that cache maintenance on virtual address fill TLB.)
124  * Acces Bit (L1 & L2):
125  *  - Fast hardware emulation for kernel and user mode.
126  * Translation Fault (L1 & L2):
127  *  - Standard fault mechanism is held including vm_fault().
128  * Permission Fault (L1 & L2):
129  *  - Fast hardware emulation of modify bits and in other cases, standard
130  *    fault mechanism is held including vm_fault().
131  */
132 
133 static const struct abort aborts[] = {
134 	{abort_fatal,	"Undefined Code (0x000)"},
135 	{abort_align,	"Alignment Fault"},
136 	{abort_fatal,	"Debug Event"},
137 	{NULL,		"Access Bit (L1)"},
138 	{NULL,		"Instruction cache maintenance"},
139 	{NULL,		"Translation Fault (L1)"},
140 	{NULL,		"Access Bit (L2)"},
141 	{NULL,		"Translation Fault (L2)"},
142 
143 	{abort_fatal,	"External Abort"},
144 	{abort_fatal,	"Domain Fault (L1)"},
145 	{abort_fatal,	"Undefined Code (0x00A)"},
146 	{abort_fatal,	"Domain Fault (L2)"},
147 	{abort_fatal,	"External Translation Abort (L1)"},
148 	{NULL,		"Permission Fault (L1)"},
149 	{abort_fatal,	"External Translation Abort (L2)"},
150 	{NULL,		"Permission Fault (L2)"},
151 
152 	{abort_fatal,	"TLB Conflict Abort"},
153 	{abort_fatal,	"Undefined Code (0x401)"},
154 	{abort_fatal,	"Undefined Code (0x402)"},
155 	{abort_fatal,	"Undefined Code (0x403)"},
156 	{abort_fatal,	"Undefined Code (0x404)"},
157 	{abort_fatal,	"Undefined Code (0x405)"},
158 	{abort_fatal,	"Asynchronous External Abort"},
159 	{abort_fatal,	"Undefined Code (0x407)"},
160 
161 	{abort_fatal,	"Asynchronous Parity Error on Memory Access"},
162 	{abort_fatal,	"Parity Error on Memory Access"},
163 	{abort_fatal,	"Undefined Code (0x40A)"},
164 	{abort_fatal,	"Undefined Code (0x40B)"},
165 	{abort_fatal,	"Parity Error on Translation (L1)"},
166 	{abort_fatal,	"Undefined Code (0x40D)"},
167 	{abort_fatal,	"Parity Error on Translation (L2)"},
168 	{abort_fatal,	"Undefined Code (0x40F)"}
169 };
170 
171 static __inline void
172 call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr)
173 {
174 	ksiginfo_t ksi;
175 
176 	CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
177 	   __func__, addr, sig, code);
178 
179 	/*
180 	 * TODO: some info would be nice to know
181 	 * if we are serving data or prefetch abort.
182 	 */
183 
184 	ksiginfo_init_trap(&ksi);
185 	ksi.ksi_signo = sig;
186 	ksi.ksi_code = code;
187 	ksi.ksi_addr = (void *)addr;
188 	trapsignal(td, &ksi);
189 }
190 
191 /*
192  * abort_imprecise() handles the following abort:
193  *
194  *  FAULT_EA_IMPREC - Imprecise External Abort
195  *
196  * The imprecise means that we don't know where the abort happened,
197  * thus FAR is undefined. The abort should not never fire, but hot
198  * plugging or accidental hardware failure can be the cause of it.
199  * If the abort happens, it can even be on different (thread) context.
200  * Without any additional support, the abort is fatal, as we do not
201  * know what really happened.
202  *
203  * QQQ: Some additional functionality, like pcb_onfault but global,
204  *      can be implemented. Imprecise handlers could be registered
205  *      which tell us if the abort is caused by something they know
206  *      about. They should return one of three codes like:
207  *		FAULT_IS_MINE,
208  *		FAULT_CAN_BE_MINE,
209  *		FAULT_IS_NOT_MINE.
210  *      The handlers should be called until some of them returns
211  *      FAULT_IS_MINE value or all was called. If all handlers return
212  *	FAULT_IS_NOT_MINE value, then the abort is fatal.
213  */
214 static __inline void
215 abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode)
216 {
217 
218 	/*
219 	 * XXX - We can got imprecise abort as result of access
220 	 * to not-present PCI/PCIe configuration space.
221 	 */
222 #if 0
223 	goto out;
224 #endif
225 	abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
226 
227 	/*
228 	 * Returning from this function means that we ignore
229 	 * the abort for good reason. Note that imprecise abort
230 	 * could fire any time even in user mode.
231 	 */
232 
233 #if 0
234 out:
235 	if (usermode)
236 		userret(curthread, tf);
237 #endif
238 }
239 
240 /*
241  * abort_debug() handles the following abort:
242  *
243  *  FAULT_DEBUG - Debug Event
244  *
245  */
246 static __inline void
247 abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode,
248     u_int far)
249 {
250 
251 	if (usermode) {
252 		struct thread *td;
253 
254 		td = curthread;
255 		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far);
256 		userret(td, tf);
257 	} else {
258 #ifdef KDB
259 		kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf);
260 #else
261 		printf("No debugger in kernel.\n");
262 #endif
263 	}
264 }
265 
266 /*
267  * Abort handler.
268  *
269  * FAR, FSR, and everything what can be lost after enabling
270  * interrupts must be grabbed before the interrupts will be
271  * enabled. Note that when interrupts will be enabled, we
272  * could even migrate to another CPU ...
273  *
274  * TODO: move quick cases to ASM
275  */
276 void
277 abort_handler(struct trapframe *tf, int prefetch)
278 {
279 	struct thread *td;
280 	vm_offset_t far, va;
281 	int idx, rv;
282 	uint32_t fsr;
283 	struct ksig ksig;
284 	struct proc *p;
285 	struct pcb *pcb;
286 	struct vm_map *map;
287 	struct vmspace *vm;
288 	vm_prot_t ftype;
289 	bool usermode;
290 #ifdef INVARIANTS
291 	void *onfault;
292 #endif
293 
294 	VM_CNT_INC(v_trap);
295 	td = curthread;
296 
297 	fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
298 #if __ARM_ARCH >= 7
299 	far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
300 #else
301 	far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
302 #endif
303 
304 	idx = FSR_TO_FAULT(fsr);
305 	usermode = TRAPF_USERMODE(tf);	/* Abort came from user mode? */
306 	if (usermode)
307 		td->td_frame = tf;
308 
309 	CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d",
310 	    __func__, fsr, idx, far, prefetch, usermode);
311 
312 	/*
313 	 * Firstly, handle aborts that are not directly related to mapping.
314 	 */
315 	if (__predict_false(idx == FAULT_EA_IMPREC)) {
316 		abort_imprecise(tf, fsr, prefetch, usermode);
317 		return;
318 	}
319 
320 	if (__predict_false(idx == FAULT_DEBUG)) {
321 		abort_debug(tf, fsr, prefetch, usermode, far);
322 		return;
323 	}
324 
325 	/*
326 	 * ARM has a set of unprivileged load and store instructions
327 	 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other
328 	 * than user mode and OS should recognize their aborts and behave
329 	 * appropriately. However, there is no way how to do that reasonably
330 	 * in general unless we restrict the handling somehow.
331 	 *
332 	 * For now, these instructions are used only in copyin()/copyout()
333 	 * like functions where usermode buffers are checked in advance that
334 	 * they are not from KVA space. Thus, no action is needed here.
335 	 */
336 
337 	/*
338 	 * (1) Handle access and R/W hardware emulation aborts.
339 	 * (2) Check that abort is not on pmap essential address ranges.
340 	 *     There is no way how to fix it, so we don't even try.
341 	 */
342 	rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
343 	if (rv == KERN_SUCCESS)
344 		return;
345 #ifdef KDB
346 	if (kdb_active) {
347 		kdb_reenter();
348 		goto out;
349 	}
350 #endif
351 	if (rv == KERN_INVALID_ADDRESS)
352 		goto nogo;
353 
354 	if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
355 		/*
356 		 * Due to both processor errata and lazy TLB invalidation when
357 		 * access restrictions are removed from virtual pages, memory
358 		 * accesses that are allowed by the physical mapping layer may
359 		 * nonetheless cause one spurious page fault per virtual page.
360 		 * When the thread is executing a "no faulting" section that
361 		 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
362 		 * every page fault is treated as a spurious page fault,
363 		 * unless it accesses the same virtual address as the most
364 		 * recent page fault within the same "no faulting" section.
365 		 */
366 		if (td->td_md.md_spurflt_addr != far ||
367 		    (td->td_pflags & TDP_RESETSPUR) != 0) {
368 			td->td_md.md_spurflt_addr = far;
369 			td->td_pflags &= ~TDP_RESETSPUR;
370 
371 			tlb_flush_local(far & ~PAGE_MASK);
372 			return;
373 		}
374 	} else {
375 		/*
376 		 * If we get a page fault while in a critical section, then
377 		 * it is most likely a fatal kernel page fault.  The kernel
378 		 * is already going to panic trying to get a sleep lock to
379 		 * do the VM lookup, so just consider it a fatal trap so the
380 		 * kernel can print out a useful trap message and even get
381 		 * to the debugger.
382 		 *
383 		 * If we get a page fault while holding a non-sleepable
384 		 * lock, then it is most likely a fatal kernel page fault.
385 		 * If WITNESS is enabled, then it's going to whine about
386 		 * bogus LORs with various VM locks, so just skip to the
387 		 * fatal trap handling directly.
388 		 */
389 		if (td->td_critnest != 0 ||
390 		    WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
391 		    "Kernel page fault") != 0) {
392 			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
393 			return;
394 		}
395 	}
396 
397 	/* Re-enable interrupts if they were enabled previously. */
398 	if (td->td_md.md_spinlock_count == 0) {
399 		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
400 			enable_interrupts(PSR_I);
401 		if (__predict_true(tf->tf_spsr & PSR_F) == 0)
402 			enable_interrupts(PSR_F);
403 	}
404 
405 	p = td->td_proc;
406 	if (usermode) {
407 		td->td_pticks = 0;
408 		if (td->td_cowgen != p->p_cowgen)
409 			thread_cow_update(td);
410 	}
411 
412 	/* Invoke the appropriate handler, if necessary. */
413 	if (__predict_false(aborts[idx].func != NULL)) {
414 		if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
415 			goto do_trapsignal;
416 		goto out;
417 	}
418 
419 	/*
420 	 * At this point, we're dealing with one of the following aborts:
421 	 *
422 	 *  FAULT_ICACHE   - I-cache maintenance
423 	 *  FAULT_TRAN_xx  - Translation
424 	 *  FAULT_PERM_xx  - Permission
425 	 */
426 
427 	/*
428 	 * Don't pass faulting cache operation to vm_fault(). We don't want
429 	 * to handle all vm stuff at this moment.
430 	 */
431 	pcb = td->td_pcb;
432 	if (__predict_false(pcb->pcb_onfault == cachebailout)) {
433 		tf->tf_r0 = far;		/* return failing address */
434 		tf->tf_pc = (register_t)pcb->pcb_onfault;
435 		return;
436 	}
437 
438 	/* Handle remaining I-cache aborts. */
439 	if (idx == FAULT_ICACHE) {
440 		if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
441 			goto do_trapsignal;
442 		goto out;
443 	}
444 
445 	va = trunc_page(far);
446 	if (va >= KERNBASE) {
447 		/*
448 		 * Don't allow user-mode faults in kernel address space.
449 		 */
450 		if (usermode)
451 			goto nogo;
452 
453 		map = kernel_map;
454 	} else {
455 		/*
456 		 * This is a fault on non-kernel virtual memory. If curproc
457 		 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
458 		 */
459 		vm = (p != NULL) ? p->p_vmspace : NULL;
460 		if (vm == NULL)
461 			goto nogo;
462 
463 		map = &vm->vm_map;
464 		if (!usermode && (td->td_intr_nesting_level != 0 ||
465 		    pcb->pcb_onfault == NULL)) {
466 			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
467 			return;
468 		}
469 	}
470 
471 	ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
472 	if (prefetch)
473 		ftype |= VM_PROT_EXECUTE;
474 
475 #ifdef DEBUG
476 	last_fault_code = fsr;
477 #endif
478 
479 #ifdef INVARIANTS
480 	onfault = pcb->pcb_onfault;
481 	pcb->pcb_onfault = NULL;
482 #endif
483 
484 	/* Fault in the page. */
485 	rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
486 
487 #ifdef INVARIANTS
488 	pcb->pcb_onfault = onfault;
489 #endif
490 
491 	if (__predict_true(rv == KERN_SUCCESS))
492 		goto out;
493 nogo:
494 	if (!usermode) {
495 		if (td->td_intr_nesting_level == 0 &&
496 		    pcb->pcb_onfault != NULL) {
497 			tf->tf_r0 = rv;
498 			tf->tf_pc = (int)pcb->pcb_onfault;
499 			return;
500 		}
501 		CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
502 		abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
503 		return;
504 	}
505 
506 	ksig.sig = SIGSEGV;
507 	ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR;
508 	ksig.addr = far;
509 
510 do_trapsignal:
511 	call_trapsignal(td, ksig.sig, ksig.code, ksig.addr);
512 out:
513 	if (usermode)
514 		userret(td, tf);
515 }
516 
517 /*
518  * abort_fatal() handles the following data aborts:
519  *
520  *  FAULT_DEBUG		- Debug Event
521  *  FAULT_ACCESS_xx	- Acces Bit
522  *  FAULT_EA_PREC	- Precise External Abort
523  *  FAULT_DOMAIN_xx	- Domain Fault
524  *  FAULT_EA_TRAN_xx	- External Translation Abort
525  *  FAULT_EA_IMPREC	- Imprecise External Abort
526  *  + all undefined codes for ABORT
527  *
528  * We should never see these on a properly functioning system.
529  *
530  * This function is also called by the other handlers if they
531  * detect a fatal problem.
532  *
533  * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
534  */
535 static int
536 abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
537     u_int prefetch, struct thread *td, struct ksig *ksig)
538 {
539 	bool usermode;
540 	const char *mode;
541 	const char *rw_mode;
542 
543 	usermode = TRAPF_USERMODE(tf);
544 #ifdef KDTRACE_HOOKS
545 	if (!usermode) {
546 		if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far))
547 			return (0);
548 	}
549 #endif
550 
551 	mode = usermode ? "user" : "kernel";
552 	rw_mode  = fsr & FSR_WNR ? "write" : "read";
553 	disable_interrupts(PSR_I|PSR_F);
554 
555 	if (td != NULL) {
556 		printf("Fatal %s mode data abort: '%s' on %s\n", mode,
557 		    aborts[idx].desc, rw_mode);
558 		printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
559 		if (idx != FAULT_EA_IMPREC)
560 			printf("%08x, ", far);
561 		else
562 			printf("Invalid,  ");
563 		printf("spsr=%08x\n", tf->tf_spsr);
564 	} else {
565 		printf("Fatal %s mode prefetch abort at 0x%08x\n",
566 		    mode, tf->tf_pc);
567 		printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
568 	}
569 
570 	printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
571 	    tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
572 	printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
573 	    tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
574 	printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
575 	    tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
576 	printf("r12=%08x, ", tf->tf_r12);
577 
578 	if (usermode)
579 		printf("usp=%08x, ulr=%08x",
580 		    tf->tf_usr_sp, tf->tf_usr_lr);
581 	else
582 		printf("ssp=%08x, slr=%08x",
583 		    tf->tf_svc_sp, tf->tf_svc_lr);
584 	printf(", pc =%08x\n\n", tf->tf_pc);
585 
586 #ifdef KDB
587 	if (debugger_on_panic || kdb_active)
588 		kdb_trap(fsr, 0, tf);
589 #endif
590 	panic("Fatal abort");
591 	/*NOTREACHED*/
592 }
593 
594 /*
595  * abort_align() handles the following data abort:
596  *
597  *  FAULT_ALIGN - Alignment fault
598  *
599  * Everything should be aligned in kernel with exception of user to kernel
600  * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
601  * We generate signal in case of abort from user mode.
602  */
603 static int
604 abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
605     u_int prefetch, struct thread *td, struct ksig *ksig)
606 {
607 	bool usermode;
608 
609 	usermode = TRAPF_USERMODE(tf);
610 	if (!usermode) {
611 		if (td->td_intr_nesting_level == 0 && td != NULL &&
612 		    td->td_pcb->pcb_onfault != NULL) {
613 			tf->tf_r0 = EFAULT;
614 			tf->tf_pc = (int)td->td_pcb->pcb_onfault;
615 			return (0);
616 		}
617 		abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
618 	}
619 	/* Deliver a bus error signal to the process */
620 	ksig->code = BUS_ADRALN;
621 	ksig->sig = SIGBUS;
622 	ksig->addr = far;
623 	return (1);
624 }
625 
626 /*
627  * abort_icache() handles the following data abort:
628  *
629  * FAULT_ICACHE - Instruction cache maintenance
630  *
631  * According to manual, FAULT_ICACHE is translation fault during cache
632  * maintenance operation. In fact, no cache maintenance operation on
633  * not mapped virtual addresses should be called. As cache maintenance
634  * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
635  * the abort is concider as fatal for now. However, all the matter with
636  * cache maintenance operation on virtual addresses could be really complex
637  * and fuzzy in SMP case, so maybe in future standard fault mechanism
638  * should be held here including vm_fault() calling.
639  */
640 static int
641 abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
642     u_int prefetch, struct thread *td, struct ksig *ksig)
643 {
644 
645 	abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
646 	return(0);
647 }
648