1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the University of Utah, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 /*
42 * AMD64 Trap and System call handling
43 */
44
45 #include "opt_clock.h"
46 #include "opt_cpu.h"
47 #include "opt_hwpmc_hooks.h"
48 #include "opt_isa.h"
49 #include "opt_kdb.h"
50
51 #include <sys/param.h>
52 #include <sys/asan.h>
53 #include <sys/bus.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/ptrace.h>
57 #include <sys/kdb.h>
58 #include <sys/kernel.h>
59 #include <sys/ktr.h>
60 #include <sys/lock.h>
61 #include <sys/msan.h>
62 #include <sys/mutex.h>
63 #include <sys/resourcevar.h>
64 #include <sys/signalvar.h>
65 #include <sys/syscall.h>
66 #include <sys/sysctl.h>
67 #include <sys/sysent.h>
68 #include <sys/uio.h>
69 #include <sys/vmmeter.h>
70 #ifdef HWPMC_HOOKS
71 #include <sys/pmckern.h>
72 PMC_SOFT_DEFINE( , , page_fault, all);
73 PMC_SOFT_DEFINE( , , page_fault, read);
74 PMC_SOFT_DEFINE( , , page_fault, write);
75 #endif
76
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_extern.h>
84
85 #include <machine/cpu.h>
86 #include <machine/intr_machdep.h>
87 #include <x86/mca.h>
88 #include <machine/md_var.h>
89 #include <machine/pcb.h>
90 #ifdef SMP
91 #include <machine/smp.h>
92 #endif
93 #include <machine/stack.h>
94 #include <machine/trap.h>
95 #include <machine/tss.h>
96
97 #ifdef KDTRACE_HOOKS
98 #include <sys/dtrace_bsd.h>
99 #endif
100
101 extern inthand_t IDTVEC(bpt), IDTVEC(bpt_pti), IDTVEC(dbg),
102 IDTVEC(fast_syscall), IDTVEC(fast_syscall_pti), IDTVEC(fast_syscall32),
103 IDTVEC(int0x80_syscall_pti), IDTVEC(int0x80_syscall);
104
105 void __noinline trap(struct trapframe *frame);
106 void trap_check(struct trapframe *frame);
107 void dblfault_handler(struct trapframe *frame);
108
109 static int trap_pfault(struct trapframe *, bool, int *, int *);
110 static void trap_fatal(struct trapframe *, vm_offset_t);
111 #ifdef KDTRACE_HOOKS
112 static bool trap_user_dtrace(struct trapframe *,
113 int (**hook)(struct trapframe *));
114 #endif
115
116 static const char UNKNOWN[] = "unknown";
117 static const char *const trap_msg[] = {
118 [0] = UNKNOWN, /* unused */
119 [T_PRIVINFLT] = "privileged instruction fault",
120 [2] = UNKNOWN, /* unused */
121 [T_BPTFLT] = "breakpoint instruction fault",
122 [4] = UNKNOWN, /* unused */
123 [5] = UNKNOWN, /* unused */
124 [T_ARITHTRAP] = "arithmetic trap",
125 [7] = UNKNOWN, /* unused */
126 [8] = UNKNOWN, /* unused */
127 [T_PROTFLT] = "general protection fault",
128 [T_TRCTRAP] = "debug exception",
129 [11] = UNKNOWN, /* unused */
130 [T_PAGEFLT] = "page fault",
131 [13] = UNKNOWN, /* unused */
132 [T_ALIGNFLT] = "alignment fault",
133 [15] = UNKNOWN, /* unused */
134 [16] = UNKNOWN, /* unused */
135 [17] = UNKNOWN, /* unused */
136 [T_DIVIDE] = "integer divide fault",
137 [T_NMI] = "non-maskable interrupt trap",
138 [T_OFLOW] = "overflow trap",
139 [T_BOUND] = "FPU bounds check fault",
140 [T_DNA] = "FPU device not available",
141 [T_DOUBLEFLT] = "double fault",
142 [T_FPOPFLT] = "FPU operand fetch fault",
143 [T_TSSFLT] = "invalid TSS fault",
144 [T_SEGNPFLT] = "segment not present fault",
145 [T_STKFLT] = "stack fault",
146 [T_MCHK] = "machine check trap",
147 [T_XMMFLT] = "SIMD floating-point exception",
148 [T_RESERVED] = "reserved (unknown) fault",
149 [31] = UNKNOWN, /* reserved */
150 [T_DTRACE_RET] = "DTrace pid return trap",
151 };
152
153 static int uprintf_signal;
154 SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN,
155 &uprintf_signal, 0,
156 "Print debugging information on trap signal to ctty");
157
158 /*
159 * Control L1D flush on return from NMI.
160 *
161 * Tunable can be set to the following values:
162 * 0 - only enable flush on return from NMI if required by vmm.ko (default)
163 * >1 - always flush on return from NMI.
164 *
165 * Post-boot, the sysctl indicates if flushing is currently enabled.
166 */
167 int nmi_flush_l1d_sw;
168 SYSCTL_INT(_machdep, OID_AUTO, nmi_flush_l1d_sw, CTLFLAG_RWTUN,
169 &nmi_flush_l1d_sw, 0,
170 "Flush L1 Data Cache on NMI exit, software bhyve L1TF mitigation assist");
171
172 /*
173 * Table of handlers for various segment load faults.
174 */
175 static const struct {
176 uintptr_t faddr;
177 uintptr_t fhandler;
178 } sfhandlers[] = {
179 {
180 .faddr = (uintptr_t)ld_ds,
181 .fhandler = (uintptr_t)ds_load_fault,
182 },
183 {
184 .faddr = (uintptr_t)ld_es,
185 .fhandler = (uintptr_t)es_load_fault,
186 },
187 {
188 .faddr = (uintptr_t)ld_fs,
189 .fhandler = (uintptr_t)fs_load_fault,
190 },
191 {
192 .faddr = (uintptr_t)ld_gs,
193 .fhandler = (uintptr_t)gs_load_fault,
194 },
195 {
196 .faddr = (uintptr_t)ld_gsbase,
197 .fhandler = (uintptr_t)gsbase_load_fault
198 },
199 {
200 .faddr = (uintptr_t)ld_fsbase,
201 .fhandler = (uintptr_t)fsbase_load_fault,
202 },
203 };
204
205 /*
206 * Exception, fault, and trap interface to the FreeBSD kernel.
207 * This common code is called from assembly language IDT gate entry
208 * routines that prepare a suitable stack frame, and restore this
209 * frame after the exception has been processed.
210 */
211
212 void
trap(struct trapframe * frame)213 trap(struct trapframe *frame)
214 {
215 ksiginfo_t ksi;
216 struct thread *td;
217 struct proc *p;
218 register_t addr, dr6;
219 size_t i;
220 int pf, signo, ucode;
221 u_int type;
222
223 td = curthread;
224 p = td->td_proc;
225 dr6 = 0;
226
227 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
228 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
229
230 VM_CNT_INC(v_trap);
231 type = frame->tf_trapno;
232
233 #ifdef SMP
234 /* Handler for NMI IPIs used for stopping CPUs. */
235 if (type == T_NMI && ipi_nmi_handler() == 0)
236 return;
237 #endif
238
239 #ifdef KDB
240 if (kdb_active) {
241 kdb_reenter();
242 return;
243 }
244 #endif
245
246 if (type == T_RESERVED) {
247 trap_fatal(frame, 0);
248 return;
249 }
250
251 if (type == T_NMI) {
252 #ifdef HWPMC_HOOKS
253 /*
254 * CPU PMCs interrupt using an NMI. If the PMC module is
255 * active, pass the 'rip' value to the PMC module's interrupt
256 * handler. A non-zero return value from the handler means that
257 * the NMI was consumed by it and we can return immediately.
258 */
259 if (pmc_intr != NULL &&
260 (*pmc_intr)(frame) != 0)
261 return;
262 #endif
263 }
264
265 if ((frame->tf_rflags & PSL_I) == 0) {
266 /*
267 * Buggy application or kernel code has disabled
268 * interrupts and then trapped. Enabling interrupts
269 * now is wrong, but it is better than running with
270 * interrupts disabled until they are accidentally
271 * enabled later.
272 */
273 if (TRAPF_USERMODE(frame)) {
274 uprintf(
275 "pid %ld (%s): trap %d (%s) "
276 "with interrupts disabled\n",
277 (long)curproc->p_pid, curthread->td_name, type,
278 trap_msg[type]);
279 } else {
280 switch (type) {
281 case T_NMI:
282 case T_BPTFLT:
283 case T_TRCTRAP:
284 case T_PROTFLT:
285 case T_SEGNPFLT:
286 case T_STKFLT:
287 break;
288 default:
289 printf(
290 "kernel trap %d with interrupts disabled\n",
291 type);
292
293 /*
294 * We shouldn't enable interrupts while holding a
295 * spin lock.
296 */
297 if (td->td_md.md_spinlock_count == 0)
298 enable_intr();
299 }
300 }
301 }
302
303 if (TRAPF_USERMODE(frame)) {
304 /* user trap */
305
306 td->td_pticks = 0;
307 td->td_frame = frame;
308 addr = frame->tf_rip;
309 if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
310 thread_cow_update(td);
311
312 switch (type) {
313 case T_PRIVINFLT: /* privileged instruction fault */
314 signo = SIGILL;
315 ucode = ILL_PRVOPC;
316 break;
317
318 case T_BPTFLT: /* bpt instruction fault */
319 #ifdef KDTRACE_HOOKS
320 if (trap_user_dtrace(frame, &dtrace_pid_probe_ptr))
321 return;
322 #else
323 enable_intr();
324 #endif
325 signo = SIGTRAP;
326 ucode = TRAP_BRKPT;
327 break;
328
329 case T_TRCTRAP: /* debug exception */
330 enable_intr();
331 signo = SIGTRAP;
332 ucode = TRAP_TRACE;
333 dr6 = rdr6();
334 if ((dr6 & DBREG_DR6_BS) != 0) {
335 PROC_LOCK(td->td_proc);
336 if ((td->td_dbgflags & TDB_STEP) != 0) {
337 td->td_frame->tf_rflags &= ~PSL_T;
338 td->td_dbgflags &= ~TDB_STEP;
339 }
340 PROC_UNLOCK(td->td_proc);
341 }
342 break;
343
344 case T_ARITHTRAP: /* arithmetic trap */
345 ucode = fputrap_x87();
346 if (ucode == -1)
347 return;
348 signo = SIGFPE;
349 break;
350
351 case T_PROTFLT: /* general protection fault */
352 signo = SIGBUS;
353 ucode = BUS_OBJERR;
354 break;
355 case T_STKFLT: /* stack fault */
356 case T_SEGNPFLT: /* segment not present fault */
357 signo = SIGBUS;
358 ucode = BUS_ADRERR;
359 break;
360 case T_TSSFLT: /* invalid TSS fault */
361 signo = SIGBUS;
362 ucode = BUS_OBJERR;
363 break;
364 case T_ALIGNFLT:
365 signo = SIGBUS;
366 ucode = BUS_ADRALN;
367 break;
368 case T_DOUBLEFLT: /* double fault */
369 default:
370 signo = SIGBUS;
371 ucode = BUS_OBJERR;
372 break;
373
374 case T_PAGEFLT: /* page fault */
375 /*
376 * Can emulator handle this trap?
377 */
378 if (*p->p_sysent->sv_trap != NULL &&
379 (*p->p_sysent->sv_trap)(td) == 0)
380 return;
381
382 pf = trap_pfault(frame, true, &signo, &ucode);
383 if (pf == -1)
384 return;
385 if (pf == 0)
386 goto userret;
387 addr = frame->tf_addr;
388 break;
389
390 case T_DIVIDE: /* integer divide fault */
391 ucode = FPE_INTDIV;
392 signo = SIGFPE;
393 break;
394
395 case T_NMI:
396 nmi_handle_intr(type, frame);
397 return;
398
399 case T_OFLOW: /* integer overflow fault */
400 ucode = FPE_INTOVF;
401 signo = SIGFPE;
402 break;
403
404 case T_BOUND: /* bounds check fault */
405 ucode = FPE_FLTSUB;
406 signo = SIGFPE;
407 break;
408
409 case T_DNA:
410 /* transparent fault (due to context switch "late") */
411 KASSERT(PCB_USER_FPU(td->td_pcb),
412 ("kernel FPU ctx has leaked"));
413 fpudna();
414 return;
415
416 case T_FPOPFLT: /* FPU operand fetch fault */
417 ucode = ILL_COPROC;
418 signo = SIGILL;
419 break;
420
421 case T_XMMFLT: /* SIMD floating-point exception */
422 ucode = fputrap_sse();
423 if (ucode == -1)
424 return;
425 signo = SIGFPE;
426 break;
427 #ifdef KDTRACE_HOOKS
428 case T_DTRACE_RET:
429 (void)trap_user_dtrace(frame, &dtrace_return_probe_ptr);
430 return;
431 #endif
432 }
433 } else {
434 /* kernel trap */
435
436 KASSERT(cold || td->td_ucred != NULL,
437 ("kernel trap doesn't have ucred"));
438 switch (type) {
439 case T_PAGEFLT: /* page fault */
440 (void)trap_pfault(frame, false, NULL, NULL);
441 return;
442
443 case T_DNA:
444 if (PCB_USER_FPU(td->td_pcb))
445 panic("Unregistered use of FPU in kernel");
446 fpudna();
447 return;
448
449 case T_ARITHTRAP: /* arithmetic trap */
450 case T_XMMFLT: /* SIMD floating-point exception */
451 case T_FPOPFLT: /* FPU operand fetch fault */
452 /*
453 * For now, supporting kernel handler
454 * registration for FPU traps is overkill.
455 */
456 trap_fatal(frame, 0);
457 return;
458
459 case T_STKFLT: /* stack fault */
460 case T_PROTFLT: /* general protection fault */
461 case T_SEGNPFLT: /* segment not present fault */
462 if (td->td_intr_nesting_level != 0)
463 break;
464
465 /*
466 * Invalid segment selectors and out of bounds
467 * %rip's and %rsp's can be set up in user mode.
468 * This causes a fault in kernel mode when the
469 * kernel tries to return to user mode. We want
470 * to get this fault so that we can fix the
471 * problem here and not have to check all the
472 * selectors and pointers when the user changes
473 * them.
474 *
475 * In case of PTI, the IRETQ faulted while the
476 * kernel used the pti stack, and exception
477 * frame records %rsp value pointing to that
478 * stack. If we return normally to
479 * doreti_iret_fault, the trapframe is
480 * reconstructed on pti stack, and calltrap()
481 * called on it as well. Due to the very
482 * limited pti stack size, kernel does not
483 * survive for too long. Switch to the normal
484 * thread stack for the trap handling.
485 *
486 * Magic '5' is the number of qwords occupied by
487 * the hardware trap frame.
488 */
489 if (frame->tf_rip == (long)doreti_iret) {
490 KASSERT((read_rflags() & PSL_I) == 0,
491 ("interrupts enabled"));
492 frame->tf_rip = (long)doreti_iret_fault;
493 if ((PCPU_GET(curpmap)->pm_ucr3 !=
494 PMAP_NO_CR3) &&
495 (frame->tf_rsp == (uintptr_t)PCPU_GET(
496 pti_rsp0) - 5 * sizeof(register_t))) {
497 frame->tf_rsp = PCPU_GET(rsp0) - 5 *
498 sizeof(register_t);
499 }
500 return;
501 }
502
503 for (i = 0; i < nitems(sfhandlers); i++) {
504 if (frame->tf_rip == sfhandlers[i].faddr) {
505 KASSERT((read_rflags() & PSL_I) == 0,
506 ("interrupts enabled"));
507 frame->tf_rip = sfhandlers[i].fhandler;
508 return;
509 }
510 }
511
512 if (curpcb->pcb_onfault != NULL) {
513 frame->tf_rip = (long)curpcb->pcb_onfault;
514 return;
515 }
516 break;
517
518 case T_TSSFLT:
519 /*
520 * PSL_NT can be set in user mode and isn't cleared
521 * automatically when the kernel is entered. This
522 * causes a TSS fault when the kernel attempts to
523 * `iret' because the TSS link is uninitialized. We
524 * want to get this fault so that we can fix the
525 * problem here and not every time the kernel is
526 * entered.
527 */
528 if (frame->tf_rflags & PSL_NT) {
529 frame->tf_rflags &= ~PSL_NT;
530 return;
531 }
532 break;
533
534 case T_TRCTRAP: /* debug exception */
535 /* Clear any pending debug events. */
536 dr6 = rdr6();
537 load_dr6(0);
538
539 /*
540 * Ignore debug register exceptions due to
541 * accesses in the user's address space, which
542 * can happen under several conditions such as
543 * if a user sets a watchpoint on a buffer and
544 * then passes that buffer to a system call.
545 * We still want to get TRCTRAPS for addresses
546 * in kernel space because that is useful when
547 * debugging the kernel.
548 */
549 if (user_dbreg_trap(dr6))
550 return;
551
552 /*
553 * Malicious user code can configure a debug
554 * register watchpoint to trap on data access
555 * to the top of stack and then execute 'pop
556 * %ss; int 3'. Due to exception deferral for
557 * 'pop %ss', the CPU will not interrupt 'int
558 * 3' to raise the DB# exception for the debug
559 * register but will postpone the DB# until
560 * execution of the first instruction of the
561 * BP# handler (in kernel mode). Normally the
562 * previous check would ignore DB# exceptions
563 * for watchpoints on user addresses raised in
564 * kernel mode. However, some CPU errata
565 * include cases where DB# exceptions do not
566 * properly set bits in %dr6, e.g. Haswell
567 * HSD23 and Skylake-X SKZ24.
568 *
569 * A deferred DB# can also be raised on the
570 * first instructions of system call entry
571 * points or single-step traps via similar use
572 * of 'pop %ss' or 'mov xxx, %ss'.
573 */
574 if (pti) {
575 if (frame->tf_rip ==
576 (uintptr_t)IDTVEC(fast_syscall_pti) ||
577 #ifdef COMPAT_FREEBSD32
578 frame->tf_rip ==
579 (uintptr_t)IDTVEC(int0x80_syscall_pti) ||
580 #endif
581 frame->tf_rip == (uintptr_t)IDTVEC(bpt_pti))
582 return;
583 } else {
584 if (frame->tf_rip ==
585 (uintptr_t)IDTVEC(fast_syscall) ||
586 #ifdef COMPAT_FREEBSD32
587 frame->tf_rip ==
588 (uintptr_t)IDTVEC(int0x80_syscall) ||
589 #endif
590 frame->tf_rip == (uintptr_t)IDTVEC(bpt))
591 return;
592 }
593 if (frame->tf_rip == (uintptr_t)IDTVEC(dbg) ||
594 /* Needed for AMD. */
595 frame->tf_rip == (uintptr_t)IDTVEC(fast_syscall32))
596 return;
597 /*
598 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
599 */
600 case T_BPTFLT:
601 /*
602 * If KDB is enabled, let it handle the debugger trap.
603 * Otherwise, debugger traps "can't happen".
604 */
605 #ifdef KDB
606 if (kdb_trap(type, dr6, frame))
607 return;
608 #endif
609 break;
610
611 case T_NMI:
612 nmi_handle_intr(type, frame);
613 return;
614 }
615
616 trap_fatal(frame, 0);
617 return;
618 }
619
620 ksiginfo_init_trap(&ksi);
621 ksi.ksi_signo = signo;
622 ksi.ksi_code = ucode;
623 ksi.ksi_trapno = type;
624 ksi.ksi_addr = (void *)addr;
625 if (uprintf_signal) {
626 uprintf("pid %d comm %s: signal %d err %#lx code %d type %d "
627 "addr %#lx rsp %#lx rip %#lx rax %#lx "
628 "<%02x %02x %02x %02x %02x %02x %02x %02x>\n",
629 p->p_pid, p->p_comm, signo, frame->tf_err, ucode, type,
630 addr, frame->tf_rsp, frame->tf_rip, frame->tf_rax,
631 fubyte((void *)(frame->tf_rip + 0)),
632 fubyte((void *)(frame->tf_rip + 1)),
633 fubyte((void *)(frame->tf_rip + 2)),
634 fubyte((void *)(frame->tf_rip + 3)),
635 fubyte((void *)(frame->tf_rip + 4)),
636 fubyte((void *)(frame->tf_rip + 5)),
637 fubyte((void *)(frame->tf_rip + 6)),
638 fubyte((void *)(frame->tf_rip + 7)));
639 }
640 KASSERT((read_rflags() & PSL_I) != 0, ("interrupts disabled"));
641 trapsignal(td, &ksi);
642
643 userret:
644 userret(td, frame);
645 KASSERT(PCB_USER_FPU(td->td_pcb),
646 ("Return from trap with kernel FPU ctx leaked"));
647 }
648
649 /*
650 * Ensure that we ignore any DTrace-induced faults. This function cannot
651 * be instrumented, so it cannot generate such faults itself.
652 */
653 void
trap_check(struct trapframe * frame)654 trap_check(struct trapframe *frame)
655 {
656
657 #ifdef KDTRACE_HOOKS
658 if (dtrace_trap_func != NULL &&
659 (*dtrace_trap_func)(frame, frame->tf_trapno) != 0)
660 return;
661 #endif
662 trap(frame);
663 }
664
665 static bool
trap_is_smap(struct trapframe * frame)666 trap_is_smap(struct trapframe *frame)
667 {
668
669 /*
670 * A page fault on a userspace address is classified as
671 * SMAP-induced if:
672 * - SMAP is supported;
673 * - kernel mode accessed present data page;
674 * - rflags.AC was cleared.
675 * Kernel must never access user space with rflags.AC cleared
676 * if SMAP is enabled.
677 */
678 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 &&
679 (frame->tf_err & (PGEX_P | PGEX_U | PGEX_I | PGEX_RSV)) ==
680 PGEX_P && (frame->tf_rflags & PSL_AC) == 0);
681 }
682
683 static bool
trap_is_pti(struct trapframe * frame)684 trap_is_pti(struct trapframe *frame)
685 {
686
687 return (PCPU_GET(curpmap)->pm_ucr3 != PMAP_NO_CR3 &&
688 pg_nx != 0 && (frame->tf_err & (PGEX_P | PGEX_W |
689 PGEX_U | PGEX_I)) == (PGEX_P | PGEX_U | PGEX_I) &&
690 (curpcb->pcb_saved_ucr3 & ~CR3_PCID_MASK) ==
691 (PCPU_GET(curpmap)->pm_cr3 & ~CR3_PCID_MASK));
692 }
693
694 /*
695 * Handle all details of a page fault.
696 * Returns:
697 * -1 if this fault was fatal, typically from kernel mode
698 * (cannot happen, but we need to return something).
699 * 0 if this fault was handled by updating either the user or kernel
700 * page table, execution can continue.
701 * 1 if this fault was from usermode and it was not handled, a synchronous
702 * signal should be delivered to the thread. *signo returns the signal
703 * number, *ucode gives si_code.
704 */
705 static int
trap_pfault(struct trapframe * frame,bool usermode,int * signo,int * ucode)706 trap_pfault(struct trapframe *frame, bool usermode, int *signo, int *ucode)
707 {
708 struct thread *td;
709 struct proc *p;
710 vm_map_t map;
711 vm_offset_t eva;
712 int rv;
713 vm_prot_t ftype;
714
715 MPASS(!usermode || (signo != NULL && ucode != NULL));
716
717 td = curthread;
718 p = td->td_proc;
719 eva = frame->tf_addr;
720
721 if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
722 /*
723 * Due to both processor errata and lazy TLB invalidation when
724 * access restrictions are removed from virtual pages, memory
725 * accesses that are allowed by the physical mapping layer may
726 * nonetheless cause one spurious page fault per virtual page.
727 * When the thread is executing a "no faulting" section that
728 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
729 * every page fault is treated as a spurious page fault,
730 * unless it accesses the same virtual address as the most
731 * recent page fault within the same "no faulting" section.
732 */
733 if (td->td_md.md_spurflt_addr != eva ||
734 (td->td_pflags & TDP_RESETSPUR) != 0) {
735 /*
736 * Do nothing to the TLB. A stale TLB entry is
737 * flushed automatically by a page fault.
738 */
739 td->td_md.md_spurflt_addr = eva;
740 td->td_pflags &= ~TDP_RESETSPUR;
741 return (0);
742 }
743 } else {
744 /*
745 * If we get a page fault while in a critical section, then
746 * it is most likely a fatal kernel page fault. The kernel
747 * is already going to panic trying to get a sleep lock to
748 * do the VM lookup, so just consider it a fatal trap so the
749 * kernel can print out a useful trap message and even get
750 * to the debugger.
751 *
752 * If we get a page fault while holding a non-sleepable
753 * lock, then it is most likely a fatal kernel page fault.
754 * If WITNESS is enabled, then it's going to whine about
755 * bogus LORs with various VM locks, so just skip to the
756 * fatal trap handling directly.
757 */
758 if (td->td_critnest != 0 ||
759 WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
760 "Kernel page fault") != 0) {
761 trap_fatal(frame, eva);
762 return (-1);
763 }
764 }
765 if (eva >= VM_MIN_KERNEL_ADDRESS) {
766 /*
767 * Don't allow user-mode faults in kernel address space.
768 */
769 if (usermode) {
770 *signo = SIGSEGV;
771 *ucode = SEGV_MAPERR;
772 return (1);
773 }
774
775 map = kernel_map;
776 } else {
777 map = &p->p_vmspace->vm_map;
778
779 /*
780 * When accessing a usermode address, kernel must be
781 * ready to accept the page fault, and provide a
782 * handling routine. Since accessing the address
783 * without the handler is a bug, do not try to handle
784 * it normally, and panic immediately.
785 *
786 * If SMAP is enabled, filter SMAP faults also,
787 * because illegal access might occur to the mapped
788 * user address, causing infinite loop.
789 */
790 if (!usermode && (td->td_intr_nesting_level != 0 ||
791 trap_is_smap(frame) || curpcb->pcb_onfault == NULL)) {
792 trap_fatal(frame, eva);
793 return (-1);
794 }
795 }
796
797 /*
798 * If the trap was caused by errant bits in the PTE then panic.
799 */
800 if (frame->tf_err & PGEX_RSV) {
801 trap_fatal(frame, eva);
802 return (-1);
803 }
804
805 /*
806 * User-mode protection key violation (PKU). May happen
807 * either from usermode or from kernel if copyin accessed
808 * key-protected mapping.
809 */
810 if ((frame->tf_err & PGEX_PK) != 0) {
811 if (eva > VM_MAXUSER_ADDRESS) {
812 trap_fatal(frame, eva);
813 return (-1);
814 }
815 if (usermode) {
816 *signo = SIGSEGV;
817 *ucode = SEGV_PKUERR;
818 return (1);
819 }
820 goto after_vmfault;
821 }
822
823 /*
824 * If nx protection of the usermode portion of kernel page
825 * tables caused trap, panic.
826 */
827 if (usermode && trap_is_pti(frame))
828 panic("PTI: pid %d comm %s tf_err %#lx", p->p_pid,
829 p->p_comm, frame->tf_err);
830
831 /*
832 * PGEX_I is defined only if the execute disable bit capability is
833 * supported and enabled.
834 */
835 if (frame->tf_err & PGEX_W)
836 ftype = VM_PROT_WRITE;
837 else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
838 ftype = VM_PROT_EXECUTE;
839 else
840 ftype = VM_PROT_READ;
841
842 /* Fault in the page. */
843 rv = vm_fault_trap(map, eva, ftype, VM_FAULT_NORMAL, signo, ucode);
844 if (rv == KERN_SUCCESS) {
845 #ifdef HWPMC_HOOKS
846 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
847 PMC_SOFT_CALL_TF( , , page_fault, all, frame);
848 if (ftype == VM_PROT_READ)
849 PMC_SOFT_CALL_TF( , , page_fault, read,
850 frame);
851 else
852 PMC_SOFT_CALL_TF( , , page_fault, write,
853 frame);
854 }
855 #endif
856 return (0);
857 }
858
859 if (usermode)
860 return (1);
861 after_vmfault:
862 if (td->td_intr_nesting_level == 0 &&
863 curpcb->pcb_onfault != NULL) {
864 frame->tf_rip = (long)curpcb->pcb_onfault;
865 return (0);
866 }
867 trap_fatal(frame, eva);
868 return (-1);
869 }
870
871 static void
trap_fatal(struct trapframe * frame,vm_offset_t eva)872 trap_fatal(struct trapframe *frame, vm_offset_t eva)
873 {
874 int code, ss;
875 u_int type;
876 struct soft_segment_descriptor softseg;
877 struct user_segment_descriptor *gdt;
878 #ifdef KDB
879 bool handled;
880 #endif
881
882 code = frame->tf_err;
883 type = frame->tf_trapno;
884 gdt = *PCPU_PTR(gdt);
885 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);
886
887 printf("\n\nFatal trap %d: %s while in %s mode\n", type,
888 type < nitems(trap_msg) ? trap_msg[type] : UNKNOWN,
889 TRAPF_USERMODE(frame) ? "user" : "kernel");
890 #ifdef SMP
891 /* two separate prints in case of a trap on an unmapped page */
892 printf("cpuid = %d; ", PCPU_GET(cpuid));
893 printf("apic id = %02x\n", PCPU_GET(apic_id));
894 #endif
895 if (type == T_PAGEFLT) {
896 printf("fault virtual address = 0x%lx\n", eva);
897 printf("fault code = %s %s %s%s%s, %s\n",
898 code & PGEX_U ? "user" : "supervisor",
899 code & PGEX_W ? "write" : "read",
900 code & PGEX_I ? "instruction" : "data",
901 code & PGEX_PK ? " prot key" : "",
902 code & PGEX_SGX ? " SGX" : "",
903 code & PGEX_RSV ? "reserved bits in PTE" :
904 code & PGEX_P ? "protection violation" : "page not present");
905 }
906 printf("instruction pointer = 0x%lx:0x%lx\n",
907 frame->tf_cs & 0xffff, frame->tf_rip);
908 ss = frame->tf_ss & 0xffff;
909 printf("stack pointer = 0x%x:0x%lx\n", ss, frame->tf_rsp);
910 printf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp);
911 printf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n",
912 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
913 printf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n",
914 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
915 softseg.ssd_gran);
916 printf("processor eflags = ");
917 if (frame->tf_rflags & PSL_T)
918 printf("trace trap, ");
919 if (frame->tf_rflags & PSL_I)
920 printf("interrupt enabled, ");
921 if (frame->tf_rflags & PSL_NT)
922 printf("nested task, ");
923 if (frame->tf_rflags & PSL_RF)
924 printf("resume, ");
925 printf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
926 printf("current process = %d (%s)\n",
927 curproc->p_pid, curthread->td_name);
928
929 printf("rdi: %016lx rsi: %016lx rdx: %016lx\n", frame->tf_rdi,
930 frame->tf_rsi, frame->tf_rdx);
931 printf("rcx: %016lx r8: %016lx r9: %016lx\n", frame->tf_rcx,
932 frame->tf_r8, frame->tf_r9);
933 printf("rax: %016lx rbx: %016lx rbp: %016lx\n", frame->tf_rax,
934 frame->tf_rbx, frame->tf_rbp);
935 printf("r10: %016lx r11: %016lx r12: %016lx\n", frame->tf_r10,
936 frame->tf_r11, frame->tf_r12);
937 printf("r13: %016lx r14: %016lx r15: %016lx\n", frame->tf_r13,
938 frame->tf_r14, frame->tf_r15);
939
940 #ifdef KDB
941 if (debugger_on_trap) {
942 kdb_why = KDB_WHY_TRAP;
943 handled = kdb_trap(type, 0, frame);
944 kdb_why = KDB_WHY_UNSET;
945 if (handled)
946 return;
947 }
948 #endif
949 printf("trap number = %d\n", type);
950 panic("%s", type < nitems(trap_msg) ? trap_msg[type] :
951 "unknown/reserved trap");
952 }
953
954 #ifdef KDTRACE_HOOKS
955 /*
956 * Invoke a userspace DTrace hook. The hook pointer is cleared when no
957 * userspace probes are enabled, so we must synchronize with DTrace to ensure
958 * that a trapping thread is able to call the hook before it is cleared.
959 */
960 static bool
trap_user_dtrace(struct trapframe * frame,int (** hookp)(struct trapframe *))961 trap_user_dtrace(struct trapframe *frame, int (**hookp)(struct trapframe *))
962 {
963 int (*hook)(struct trapframe *);
964
965 hook = atomic_load_ptr(hookp);
966 enable_intr();
967 if (hook != NULL)
968 return ((hook)(frame) == 0);
969 return (false);
970 }
971 #endif
972
973 /*
974 * Double fault handler. Called when a fault occurs while writing
975 * a frame for a trap/exception onto the stack. This usually occurs
976 * when the stack overflows (such is the case with infinite recursion,
977 * for example).
978 */
979 void
dblfault_handler(struct trapframe * frame)980 dblfault_handler(struct trapframe *frame)
981 {
982 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
983 #ifdef KDTRACE_HOOKS
984 if (dtrace_doubletrap_func != NULL)
985 (*dtrace_doubletrap_func)();
986 #endif
987 printf("\nFatal double fault\n"
988 "rip %#lx rsp %#lx rbp %#lx\n"
989 "rax %#lx rdx %#lx rbx %#lx\n"
990 "rcx %#lx rsi %#lx rdi %#lx\n"
991 "r8 %#lx r9 %#lx r10 %#lx\n"
992 "r11 %#lx r12 %#lx r13 %#lx\n"
993 "r14 %#lx r15 %#lx rflags %#lx\n"
994 "cs %#lx ss %#lx ds %#hx es %#hx fs %#hx gs %#hx\n"
995 "fsbase %#lx gsbase %#lx kgsbase %#lx\n",
996 frame->tf_rip, frame->tf_rsp, frame->tf_rbp,
997 frame->tf_rax, frame->tf_rdx, frame->tf_rbx,
998 frame->tf_rcx, frame->tf_rdi, frame->tf_rsi,
999 frame->tf_r8, frame->tf_r9, frame->tf_r10,
1000 frame->tf_r11, frame->tf_r12, frame->tf_r13,
1001 frame->tf_r14, frame->tf_r15, frame->tf_rflags,
1002 frame->tf_cs, frame->tf_ss, frame->tf_ds, frame->tf_es,
1003 frame->tf_fs, frame->tf_gs,
1004 rdmsr(MSR_FSBASE), rdmsr(MSR_GSBASE), rdmsr(MSR_KGSBASE));
1005 #ifdef SMP
1006 /* two separate prints in case of a trap on an unmapped page */
1007 printf("cpuid = %d; ", PCPU_GET(cpuid));
1008 printf("apic id = %02x\n", PCPU_GET(apic_id));
1009 #endif
1010 panic("double fault");
1011 }
1012
1013 static int __noinline
cpu_fetch_syscall_args_fallback(struct thread * td,struct syscall_args * sa)1014 cpu_fetch_syscall_args_fallback(struct thread *td, struct syscall_args *sa)
1015 {
1016 struct proc *p;
1017 struct trapframe *frame;
1018 syscallarg_t *argp;
1019 caddr_t params;
1020 int reg, regcnt, error;
1021
1022 p = td->td_proc;
1023 frame = td->td_frame;
1024 reg = 0;
1025 regcnt = NARGREGS;
1026
1027 if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
1028 sa->code = frame->tf_rdi;
1029 reg++;
1030 regcnt--;
1031 }
1032
1033 if (sa->code >= p->p_sysent->sv_size)
1034 sa->callp = &nosys_sysent;
1035 else
1036 sa->callp = &p->p_sysent->sv_table[sa->code];
1037
1038 KASSERT(sa->callp->sy_narg <= nitems(sa->args),
1039 ("Too many syscall arguments!"));
1040 argp = &frame->tf_rdi;
1041 argp += reg;
1042 memcpy(sa->args, argp, sizeof(sa->args[0]) * NARGREGS);
1043 if (sa->callp->sy_narg > regcnt) {
1044 params = (caddr_t)frame->tf_rsp + sizeof(register_t);
1045 error = copyin(params, &sa->args[regcnt],
1046 (sa->callp->sy_narg - regcnt) * sizeof(sa->args[0]));
1047 if (__predict_false(error != 0))
1048 return (error);
1049 }
1050
1051 td->td_retval[0] = 0;
1052 td->td_retval[1] = frame->tf_rdx;
1053
1054 return (0);
1055 }
1056
1057 int
cpu_fetch_syscall_args(struct thread * td)1058 cpu_fetch_syscall_args(struct thread *td)
1059 {
1060 struct proc *p;
1061 struct trapframe *frame;
1062 struct syscall_args *sa;
1063
1064 p = td->td_proc;
1065 frame = td->td_frame;
1066 sa = &td->td_sa;
1067
1068 sa->code = frame->tf_rax;
1069 sa->original_code = sa->code;
1070
1071 if (__predict_false(sa->code == SYS_syscall ||
1072 sa->code == SYS___syscall ||
1073 sa->code >= p->p_sysent->sv_size))
1074 return (cpu_fetch_syscall_args_fallback(td, sa));
1075
1076 sa->callp = &p->p_sysent->sv_table[sa->code];
1077 KASSERT(sa->callp->sy_narg <= nitems(sa->args),
1078 ("Too many syscall arguments!"));
1079
1080 if (__predict_false(sa->callp->sy_narg > NARGREGS))
1081 return (cpu_fetch_syscall_args_fallback(td, sa));
1082
1083 memcpy(sa->args, &frame->tf_rdi, sizeof(sa->args[0]) * NARGREGS);
1084
1085 td->td_retval[0] = 0;
1086 td->td_retval[1] = frame->tf_rdx;
1087
1088 return (0);
1089 }
1090
1091 #include "../../kern/subr_syscall.c"
1092
1093 static void (*syscall_ret_l1d_flush)(void);
1094 int syscall_ret_l1d_flush_mode;
1095
1096 static void
flush_l1d_hw(void)1097 flush_l1d_hw(void)
1098 {
1099
1100 wrmsr(MSR_IA32_FLUSH_CMD, IA32_FLUSH_CMD_L1D);
1101 }
1102
1103 static void __noinline
amd64_syscall_ret_flush_l1d_check(int error)1104 amd64_syscall_ret_flush_l1d_check(int error)
1105 {
1106 void (*p)(void);
1107
1108 if (error != EEXIST && error != EAGAIN && error != EXDEV &&
1109 error != ENOENT && error != ENOTCONN && error != EINPROGRESS) {
1110 p = atomic_load_ptr(&syscall_ret_l1d_flush);
1111 if (p != NULL)
1112 p();
1113 }
1114 }
1115
1116 static void __inline
amd64_syscall_ret_flush_l1d_check_inline(int error)1117 amd64_syscall_ret_flush_l1d_check_inline(int error)
1118 {
1119
1120 if (__predict_false(error != 0))
1121 amd64_syscall_ret_flush_l1d_check(error);
1122 }
1123
1124 void
amd64_syscall_ret_flush_l1d(int error)1125 amd64_syscall_ret_flush_l1d(int error)
1126 {
1127
1128 amd64_syscall_ret_flush_l1d_check_inline(error);
1129 }
1130
1131 void
amd64_syscall_ret_flush_l1d_recalc(void)1132 amd64_syscall_ret_flush_l1d_recalc(void)
1133 {
1134 bool l1d_hw;
1135
1136 l1d_hw = (cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) != 0;
1137 again:
1138 switch (syscall_ret_l1d_flush_mode) {
1139 case 0:
1140 syscall_ret_l1d_flush = NULL;
1141 break;
1142 case 1:
1143 syscall_ret_l1d_flush = l1d_hw ? flush_l1d_hw :
1144 flush_l1d_sw_abi;
1145 break;
1146 case 2:
1147 syscall_ret_l1d_flush = l1d_hw ? flush_l1d_hw : NULL;
1148 break;
1149 case 3:
1150 syscall_ret_l1d_flush = flush_l1d_sw_abi;
1151 break;
1152 default:
1153 syscall_ret_l1d_flush_mode = 1;
1154 goto again;
1155 }
1156 }
1157
1158 static int
machdep_syscall_ret_flush_l1d(SYSCTL_HANDLER_ARGS)1159 machdep_syscall_ret_flush_l1d(SYSCTL_HANDLER_ARGS)
1160 {
1161 int error, val;
1162
1163 val = syscall_ret_l1d_flush_mode;
1164 error = sysctl_handle_int(oidp, &val, 0, req);
1165 if (error != 0 || req->newptr == NULL)
1166 return (error);
1167 syscall_ret_l1d_flush_mode = val;
1168 amd64_syscall_ret_flush_l1d_recalc();
1169 return (0);
1170 }
1171 SYSCTL_PROC(_machdep, OID_AUTO, syscall_ret_flush_l1d, CTLTYPE_INT |
1172 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1173 machdep_syscall_ret_flush_l1d, "I",
1174 "Flush L1D on syscall return with error (0 - off, 1 - on, "
1175 "2 - use hw only, 3 - use sw only)");
1176
1177 /*
1178 * System call handler for native binaries. The trap frame is already
1179 * set up by the assembler trampoline and a pointer to it is saved in
1180 * td_frame.
1181 */
1182 void
amd64_syscall(struct thread * td,int traced)1183 amd64_syscall(struct thread *td, int traced)
1184 {
1185 ksiginfo_t ksi;
1186
1187 kmsan_mark(td->td_frame, sizeof(*td->td_frame), KMSAN_STATE_INITED);
1188
1189 KASSERT(TRAPF_USERMODE(td->td_frame),
1190 ("%s: not from user mode", __func__));
1191
1192 syscallenter(td);
1193
1194 /*
1195 * Traced syscall.
1196 */
1197 if (__predict_false(traced)) {
1198 td->td_frame->tf_rflags &= ~PSL_T;
1199 ksiginfo_init_trap(&ksi);
1200 ksi.ksi_signo = SIGTRAP;
1201 ksi.ksi_code = TRAP_TRACE;
1202 ksi.ksi_addr = (void *)td->td_frame->tf_rip;
1203 trapsignal(td, &ksi);
1204 }
1205
1206 KASSERT(PCB_USER_FPU(td->td_pcb),
1207 ("System call %s returning with kernel FPU ctx leaked",
1208 syscallname(td->td_proc, td->td_sa.code)));
1209 KASSERT(td->td_pcb->pcb_save == get_pcb_user_save_td(td),
1210 ("System call %s returning with mangled pcb_save",
1211 syscallname(td->td_proc, td->td_sa.code)));
1212 KASSERT(pmap_not_in_di(),
1213 ("System call %s returning with leaked invl_gen %lu",
1214 syscallname(td->td_proc, td->td_sa.code),
1215 td->td_md.md_invl_gen.gen));
1216
1217 syscallret(td);
1218
1219 /*
1220 * If the user-supplied value of %rip is not a canonical
1221 * address, then some CPUs will trigger a ring 0 #GP during
1222 * the sysret instruction. However, the fault handler would
1223 * execute in ring 0 with the user's %gs and %rsp which would
1224 * not be safe. Instead, use the full return path which
1225 * catches the problem safely.
1226 */
1227 if (__predict_false(td->td_frame->tf_rip >= (la57 ?
1228 VM_MAXUSER_ADDRESS_LA57 : VM_MAXUSER_ADDRESS_LA48)))
1229 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
1230
1231 amd64_syscall_ret_flush_l1d_check_inline(td->td_errno);
1232 }
1233