1 /*-
2 * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5 * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include "opt_ktrace.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/systm.h>
35 #include <sys/proc.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/signalvar.h>
40 #include <sys/ktr.h>
41 #include <sys/vmmeter.h>
42 #ifdef KTRACE
43 #include <sys/uio.h>
44 #include <sys/ktrace.h>
45 #endif
46
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_param.h>
53
54 #include <machine/cpu.h>
55 #include <machine/frame.h>
56 #include <machine/machdep.h>
57 #include <machine/pcb.h>
58
59 #ifdef KDB
60 #include <sys/kdb.h>
61 #include <machine/db_machdep.h>
62 #endif
63
64 #ifdef KDTRACE_HOOKS
65 #include <sys/dtrace_bsd.h>
66 #endif
67
68 extern char cachebailout[];
69
70 struct ksig {
71 int sig;
72 u_long code;
73 vm_offset_t addr;
74 };
75
76 typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
77 struct thread *, struct ksig *);
78
79 static abort_func_t abort_fatal;
80 static abort_func_t abort_align;
81 static abort_func_t abort_icache;
82
83 struct abort {
84 abort_func_t *func;
85 const char *desc;
86 };
87
88 /*
89 * How are the aborts handled?
90 *
91 * Undefined Code:
92 * - Always fatal as we do not know what does it mean.
93 * Imprecise External Abort:
94 * - Always fatal, but can be handled somehow in the future.
95 * Now, due to PCIe buggy hardware, ignored.
96 * Precise External Abort:
97 * - Always fatal, but who knows in the future???
98 * Debug Event:
99 * - Special handling.
100 * External Translation Abort (L1 & L2)
101 * - Always fatal as something is screwed up in page tables or hardware.
102 * Domain Fault (L1 & L2):
103 * - Always fatal as we do not play game with domains.
104 * Alignment Fault:
105 * - Everything should be aligned in kernel with exception of user to kernel
106 * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
107 * We generate signal in case of abort from user mode.
108 * Instruction cache maintenance:
109 * - According to manual, this is translation fault during cache maintenance
110 * operation. So, it could be really complex in SMP case and fuzzy too
111 * for cache operations working on virtual addresses. For now, we will
112 * consider this abort as fatal. In fact, no cache maintenance on
113 * not mapped virtual addresses should be called. As cache maintenance
114 * operation (except DMB, DSB, and Flush Prefetch Buffer) are privileged,
115 * the abort is fatal for user mode as well for now. (This is good place to
116 * note that cache maintenance on virtual address fill TLB.)
117 * Acces Bit (L1 & L2):
118 * - Fast hardware emulation for kernel and user mode.
119 * Translation Fault (L1 & L2):
120 * - Standard fault mechanism is held including vm_fault().
121 * Permission Fault (L1 & L2):
122 * - Fast hardware emulation of modify bits and in other cases, standard
123 * fault mechanism is held including vm_fault().
124 */
125
126 static const struct abort aborts[] = {
127 {abort_fatal, "Undefined Code (0x000)"},
128 {abort_align, "Alignment Fault"},
129 {abort_fatal, "Debug Event"},
130 {NULL, "Access Bit (L1)"},
131 {NULL, "Instruction cache maintenance"},
132 {NULL, "Translation Fault (L1)"},
133 {NULL, "Access Bit (L2)"},
134 {NULL, "Translation Fault (L2)"},
135
136 {abort_fatal, "External Abort"},
137 {abort_fatal, "Domain Fault (L1)"},
138 {abort_fatal, "Undefined Code (0x00A)"},
139 {abort_fatal, "Domain Fault (L2)"},
140 {abort_fatal, "External Translation Abort (L1)"},
141 {NULL, "Permission Fault (L1)"},
142 {abort_fatal, "External Translation Abort (L2)"},
143 {NULL, "Permission Fault (L2)"},
144
145 {abort_fatal, "TLB Conflict Abort"},
146 {abort_fatal, "Undefined Code (0x401)"},
147 {abort_fatal, "Undefined Code (0x402)"},
148 {abort_fatal, "Undefined Code (0x403)"},
149 {abort_fatal, "Undefined Code (0x404)"},
150 {abort_fatal, "Undefined Code (0x405)"},
151 {abort_fatal, "Asynchronous External Abort"},
152 {abort_fatal, "Undefined Code (0x407)"},
153
154 {abort_fatal, "Asynchronous Parity Error on Memory Access"},
155 {abort_fatal, "Parity Error on Memory Access"},
156 {abort_fatal, "Undefined Code (0x40A)"},
157 {abort_fatal, "Undefined Code (0x40B)"},
158 {abort_fatal, "Parity Error on Translation (L1)"},
159 {abort_fatal, "Undefined Code (0x40D)"},
160 {abort_fatal, "Parity Error on Translation (L2)"},
161 {abort_fatal, "Undefined Code (0x40F)"}
162 };
163
164 static __inline void
call_trapsignal(struct thread * td,int sig,int code,vm_offset_t addr,int trapno)165 call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr,
166 int trapno)
167 {
168 ksiginfo_t ksi;
169
170 CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
171 __func__, addr, sig, code);
172
173 /*
174 * TODO: some info would be nice to know
175 * if we are serving data or prefetch abort.
176 */
177
178 ksiginfo_init_trap(&ksi);
179 ksi.ksi_signo = sig;
180 ksi.ksi_code = code;
181 ksi.ksi_addr = (void *)addr;
182 ksi.ksi_trapno = trapno;
183 trapsignal(td, &ksi);
184 }
185
186 /*
187 * abort_imprecise() handles the following abort:
188 *
189 * FAULT_EA_IMPREC - Imprecise External Abort
190 *
191 * The imprecise means that we don't know where the abort happened,
192 * thus FAR is undefined. The abort should not never fire, but hot
193 * plugging or accidental hardware failure can be the cause of it.
194 * If the abort happens, it can even be on different (thread) context.
195 * Without any additional support, the abort is fatal, as we do not
196 * know what really happened.
197 *
198 * QQQ: Some additional functionality, like pcb_onfault but global,
199 * can be implemented. Imprecise handlers could be registered
200 * which tell us if the abort is caused by something they know
201 * about. They should return one of three codes like:
202 * FAULT_IS_MINE,
203 * FAULT_CAN_BE_MINE,
204 * FAULT_IS_NOT_MINE.
205 * The handlers should be called until some of them returns
206 * FAULT_IS_MINE value or all was called. If all handlers return
207 * FAULT_IS_NOT_MINE value, then the abort is fatal.
208 */
209 static __inline void
abort_imprecise(struct trapframe * tf,u_int fsr,u_int prefetch,bool usermode)210 abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode)
211 {
212
213 /*
214 * XXX - We can got imprecise abort as result of access
215 * to not-present PCI/PCIe configuration space.
216 */
217 #if 0
218 goto out;
219 #endif
220 abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
221
222 /*
223 * Returning from this function means that we ignore
224 * the abort for good reason. Note that imprecise abort
225 * could fire any time even in user mode.
226 */
227
228 #if 0
229 out:
230 if (usermode)
231 userret(curthread, tf);
232 #endif
233 }
234
235 /*
236 * abort_debug() handles the following abort:
237 *
238 * FAULT_DEBUG - Debug Event
239 *
240 */
241 static __inline void
abort_debug(struct trapframe * tf,u_int fsr,u_int prefetch,bool usermode,u_int far)242 abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode,
243 u_int far)
244 {
245
246 if (usermode) {
247 struct thread *td;
248
249 td = curthread;
250 call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far, FAULT_DEBUG);
251 userret(td, tf);
252 } else {
253 #ifdef KDB
254 kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf);
255 #else
256 printf("No debugger in kernel.\n");
257 #endif
258 }
259 }
260
261 /*
262 * Abort handler.
263 *
264 * FAR, FSR, and everything what can be lost after enabling
265 * interrupts must be grabbed before the interrupts will be
266 * enabled. Note that when interrupts will be enabled, we
267 * could even migrate to another CPU ...
268 *
269 * TODO: move quick cases to ASM
270 */
271 void
abort_handler(struct trapframe * tf,int prefetch)272 abort_handler(struct trapframe *tf, int prefetch)
273 {
274 struct thread *td;
275 vm_offset_t far, va;
276 int idx, rv;
277 uint32_t fsr;
278 struct ksig ksig;
279 struct proc *p;
280 struct pcb *pcb;
281 struct vm_map *map;
282 struct vmspace *vm;
283 vm_prot_t ftype;
284 bool usermode;
285 int bp_harden, ucode;
286 #ifdef INVARIANTS
287 void *onfault;
288 #endif
289
290 VM_CNT_INC(v_trap);
291 td = curthread;
292
293 fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
294 #if __ARM_ARCH >= 7
295 far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
296 #else
297 far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
298 #endif
299
300 idx = FSR_TO_FAULT(fsr);
301 usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */
302
303 /*
304 * Apply BP hardening by flushing the branch prediction cache
305 * for prefaults on kernel addresses.
306 */
307 if (__predict_false(prefetch && far > VM_MAXUSER_ADDRESS &&
308 (idx == FAULT_TRAN_L2 || idx == FAULT_PERM_L2))) {
309 bp_harden = PCPU_GET(bp_harden_kind);
310 if (bp_harden == PCPU_BP_HARDEN_KIND_BPIALL)
311 _CP15_BPIALL();
312 else if (bp_harden == PCPU_BP_HARDEN_KIND_ICIALLU)
313 _CP15_ICIALLU();
314 }
315
316 if (usermode)
317 td->td_frame = tf;
318
319 CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d",
320 __func__, fsr, idx, far, prefetch, usermode);
321
322 /*
323 * Firstly, handle aborts that are not directly related to mapping.
324 */
325 if (__predict_false(idx == FAULT_EA_IMPREC)) {
326 abort_imprecise(tf, fsr, prefetch, usermode);
327 return;
328 }
329
330 if (__predict_false(idx == FAULT_DEBUG)) {
331 abort_debug(tf, fsr, prefetch, usermode, far);
332 return;
333 }
334
335 /*
336 * ARM has a set of unprivileged load and store instructions
337 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other
338 * than user mode and OS should recognize their aborts and behave
339 * appropriately. However, there is no way how to do that reasonably
340 * in general unless we restrict the handling somehow.
341 *
342 * For now, these instructions are used only in copyin()/copyout()
343 * like functions where usermode buffers are checked in advance that
344 * they are not from KVA space. Thus, no action is needed here.
345 */
346
347 /*
348 * (1) Handle access and R/W hardware emulation aborts.
349 * (2) Check that abort is not on pmap essential address ranges.
350 * There is no way how to fix it, so we don't even try.
351 */
352 rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
353 if (rv == KERN_SUCCESS)
354 return;
355 #ifdef KDB
356 if (kdb_active) {
357 kdb_reenter();
358 goto out;
359 }
360 #endif
361 if (rv == KERN_INVALID_ADDRESS)
362 goto nogo;
363
364 if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
365 /*
366 * Due to both processor errata and lazy TLB invalidation when
367 * access restrictions are removed from virtual pages, memory
368 * accesses that are allowed by the physical mapping layer may
369 * nonetheless cause one spurious page fault per virtual page.
370 * When the thread is executing a "no faulting" section that
371 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
372 * every page fault is treated as a spurious page fault,
373 * unless it accesses the same virtual address as the most
374 * recent page fault within the same "no faulting" section.
375 */
376 if (td->td_md.md_spurflt_addr != far ||
377 (td->td_pflags & TDP_RESETSPUR) != 0) {
378 td->td_md.md_spurflt_addr = far;
379 td->td_pflags &= ~TDP_RESETSPUR;
380
381 tlb_flush_local(far & ~PAGE_MASK);
382 return;
383 }
384 } else {
385 /*
386 * If we get a page fault while in a critical section, then
387 * it is most likely a fatal kernel page fault. The kernel
388 * is already going to panic trying to get a sleep lock to
389 * do the VM lookup, so just consider it a fatal trap so the
390 * kernel can print out a useful trap message and even get
391 * to the debugger.
392 *
393 * If we get a page fault while holding a non-sleepable
394 * lock, then it is most likely a fatal kernel page fault.
395 * If WITNESS is enabled, then it's going to whine about
396 * bogus LORs with various VM locks, so just skip to the
397 * fatal trap handling directly.
398 */
399 if (td->td_critnest != 0 ||
400 WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
401 "Kernel page fault") != 0) {
402 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
403 return;
404 }
405 }
406
407 /* Re-enable interrupts if they were enabled previously. */
408 if (td->td_md.md_spinlock_count == 0) {
409 if (__predict_true(tf->tf_spsr & PSR_I) == 0)
410 enable_interrupts(PSR_I);
411 if (__predict_true(tf->tf_spsr & PSR_F) == 0)
412 enable_interrupts(PSR_F);
413 }
414
415 p = td->td_proc;
416 if (usermode) {
417 td->td_pticks = 0;
418 if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
419 thread_cow_update(td);
420 }
421
422 /* Invoke the appropriate handler, if necessary. */
423 if (__predict_false(aborts[idx].func != NULL)) {
424 if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
425 goto do_trapsignal;
426 goto out;
427 }
428
429 /*
430 * At this point, we're dealing with one of the following aborts:
431 *
432 * FAULT_ICACHE - I-cache maintenance
433 * FAULT_TRAN_xx - Translation
434 * FAULT_PERM_xx - Permission
435 */
436
437 /*
438 * Don't pass faulting cache operation to vm_fault(). We don't want
439 * to handle all vm stuff at this moment.
440 */
441 pcb = td->td_pcb;
442 if (__predict_false(pcb->pcb_onfault == cachebailout)) {
443 tf->tf_r0 = far; /* return failing address */
444 tf->tf_pc = (register_t)pcb->pcb_onfault;
445 return;
446 }
447
448 /* Handle remaining I-cache aborts. */
449 if (idx == FAULT_ICACHE) {
450 if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
451 goto do_trapsignal;
452 goto out;
453 }
454
455 va = trunc_page(far);
456 if (va >= KERNBASE) {
457 /*
458 * Don't allow user-mode faults in kernel address space.
459 */
460 if (usermode) {
461 ksig.sig = SIGSEGV;
462 ksig.code = SEGV_ACCERR;
463 goto nogo;
464 }
465
466 map = kernel_map;
467 } else {
468 /*
469 * This is a fault on non-kernel virtual memory. If curproc
470 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
471 */
472 vm = (p != NULL) ? p->p_vmspace : NULL;
473 if (vm == NULL) {
474 ksig.sig = SIGSEGV;
475 ksig.code = 0;
476 goto nogo;
477 }
478
479 map = &vm->vm_map;
480 if (!usermode && (td->td_intr_nesting_level != 0 ||
481 pcb->pcb_onfault == NULL)) {
482 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
483 return;
484 }
485 }
486
487 ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
488 if (prefetch)
489 ftype |= VM_PROT_EXECUTE;
490
491 #ifdef INVARIANTS
492 onfault = pcb->pcb_onfault;
493 pcb->pcb_onfault = NULL;
494 #endif
495
496 /* Fault in the page. */
497 rv = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL, &ksig.sig,
498 &ucode);
499 ksig.code = ucode;
500
501 #ifdef INVARIANTS
502 pcb->pcb_onfault = onfault;
503 #endif
504
505 if (__predict_true(rv == KERN_SUCCESS))
506 goto out;
507 nogo:
508 if (!usermode) {
509 if (td->td_intr_nesting_level == 0 &&
510 pcb->pcb_onfault != NULL) {
511 tf->tf_r0 = rv;
512 tf->tf_pc = (int)pcb->pcb_onfault;
513 return;
514 }
515 CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
516 abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
517 return;
518 }
519
520 ksig.addr = far;
521
522 do_trapsignal:
523 call_trapsignal(td, ksig.sig, ksig.code, ksig.addr, idx);
524 out:
525 if (usermode)
526 userret(td, tf);
527 }
528
529 /*
530 * abort_fatal() handles the following data aborts:
531 *
532 * FAULT_DEBUG - Debug Event
533 * FAULT_ACCESS_xx - Acces Bit
534 * FAULT_EA_PREC - Precise External Abort
535 * FAULT_DOMAIN_xx - Domain Fault
536 * FAULT_EA_TRAN_xx - External Translation Abort
537 * FAULT_EA_IMPREC - Imprecise External Abort
538 * + all undefined codes for ABORT
539 *
540 * We should never see these on a properly functioning system.
541 *
542 * This function is also called by the other handlers if they
543 * detect a fatal problem.
544 *
545 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
546 */
547 static int
abort_fatal(struct trapframe * tf,u_int idx,u_int fsr,u_int far,u_int prefetch,struct thread * td,struct ksig * ksig)548 abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
549 u_int prefetch, struct thread *td, struct ksig *ksig)
550 {
551 bool usermode;
552 const char *mode;
553 const char *rw_mode;
554 #ifdef KDB
555 bool handled;
556 #endif
557
558 usermode = TRAPF_USERMODE(tf);
559 #ifdef KDTRACE_HOOKS
560 if (!usermode) {
561 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far))
562 return (0);
563 }
564 #endif
565
566 mode = usermode ? "user" : "kernel";
567 rw_mode = fsr & FSR_WNR ? "write" : "read";
568 disable_interrupts(PSR_I|PSR_F);
569
570 if (td != NULL) {
571 printf("Fatal %s mode data abort: '%s' on %s\n", mode,
572 aborts[idx].desc, rw_mode);
573 printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
574 if (idx != FAULT_EA_IMPREC)
575 printf("%08x, ", far);
576 else
577 printf("Invalid, ");
578 printf("spsr=%08x\n", tf->tf_spsr);
579 } else {
580 printf("Fatal %s mode prefetch abort at 0x%08x\n",
581 mode, tf->tf_pc);
582 printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
583 }
584
585 printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
586 tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
587 printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
588 tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
589 printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
590 tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
591 printf("r12=%08x, ", tf->tf_r12);
592
593 if (usermode)
594 printf("usp=%08x, ulr=%08x",
595 tf->tf_usr_sp, tf->tf_usr_lr);
596 else
597 printf("ssp=%08x, slr=%08x",
598 tf->tf_svc_sp, tf->tf_svc_lr);
599 printf(", pc =%08x\n\n", tf->tf_pc);
600
601 #ifdef KDB
602 if (debugger_on_trap) {
603 kdb_why = KDB_WHY_TRAP;
604 handled = kdb_trap(fsr, 0, tf);
605 kdb_why = KDB_WHY_UNSET;
606 if (handled)
607 return (0);
608 }
609 #endif
610 panic("Fatal abort");
611 /*NOTREACHED*/
612 }
613
614 /*
615 * abort_align() handles the following data abort:
616 *
617 * FAULT_ALIGN - Alignment fault
618 *
619 * Everything should be aligned in kernel with exception of user to kernel
620 * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
621 * We generate signal in case of abort from user mode.
622 */
623 static int
abort_align(struct trapframe * tf,u_int idx,u_int fsr,u_int far,u_int prefetch,struct thread * td,struct ksig * ksig)624 abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
625 u_int prefetch, struct thread *td, struct ksig *ksig)
626 {
627 bool usermode;
628
629 usermode = TRAPF_USERMODE(tf);
630 if (!usermode) {
631 if (td->td_intr_nesting_level == 0 && td != NULL &&
632 td->td_pcb->pcb_onfault != NULL) {
633 tf->tf_r0 = EFAULT;
634 tf->tf_pc = (int)td->td_pcb->pcb_onfault;
635 return (0);
636 }
637 abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
638 }
639 /* Deliver a bus error signal to the process */
640 ksig->code = BUS_ADRALN;
641 ksig->sig = SIGBUS;
642 ksig->addr = far;
643 return (1);
644 }
645
646 /*
647 * abort_icache() handles the following data abort:
648 *
649 * FAULT_ICACHE - Instruction cache maintenance
650 *
651 * According to manual, FAULT_ICACHE is translation fault during cache
652 * maintenance operation. In fact, no cache maintenance operation on
653 * not mapped virtual addresses should be called. As cache maintenance
654 * operation (except DMB, DSB, and Flush Prefetch Buffer) are privileged,
655 * the abort is concider as fatal for now. However, all the matter with
656 * cache maintenance operation on virtual addresses could be really complex
657 * and fuzzy in SMP case, so maybe in future standard fault mechanism
658 * should be held here including vm_fault() calling.
659 */
660 static int
abort_icache(struct trapframe * tf,u_int idx,u_int fsr,u_int far,u_int prefetch,struct thread * td,struct ksig * ksig)661 abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
662 u_int prefetch, struct thread *td, struct ksig *ksig)
663 {
664
665 abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
666 return(0);
667 }
668