xref: /freebsd/sys/x86/x86/cpu_machdep.c (revision 53b70c86)
1 /*-
2  * Copyright (c) 2003 Peter Wemm.
3  * Copyright (c) 1992 Terrence R. Lambert.
4  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * William Jolitz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_acpi.h"
45 #include "opt_atpic.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_isa.h"
50 #include "opt_kdb.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_mp_watchdog.h"
54 #include "opt_platform.h"
55 #ifdef __i386__
56 #include "opt_apic.h"
57 #endif
58 
59 #include <sys/param.h>
60 #include <sys/proc.h>
61 #include <sys/systm.h>
62 #include <sys/bus.h>
63 #include <sys/cpu.h>
64 #include <sys/domainset.h>
65 #include <sys/kdb.h>
66 #include <sys/kernel.h>
67 #include <sys/ktr.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mutex.h>
71 #include <sys/pcpu.h>
72 #include <sys/rwlock.h>
73 #include <sys/sched.h>
74 #include <sys/smp.h>
75 #include <sys/sysctl.h>
76 
77 #include <machine/clock.h>
78 #include <machine/cpu.h>
79 #include <machine/cpufunc.h>
80 #include <machine/cputypes.h>
81 #include <machine/specialreg.h>
82 #include <machine/md_var.h>
83 #include <machine/mp_watchdog.h>
84 #include <machine/tss.h>
85 #ifdef SMP
86 #include <machine/smp.h>
87 #endif
88 #ifdef CPU_ELAN
89 #include <machine/elan_mmcr.h>
90 #endif
91 #include <x86/acpica_machdep.h>
92 #include <x86/ifunc.h>
93 
94 #include <vm/vm.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_map.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_pager.h>
101 #include <vm/vm_param.h>
102 
103 #include <isa/isareg.h>
104 
105 #include <contrib/dev/acpica/include/acpi.h>
106 
107 #define	STATE_RUNNING	0x0
108 #define	STATE_MWAIT	0x1
109 #define	STATE_SLEEPING	0x2
110 
111 #ifdef SMP
112 static u_int	cpu_reset_proxyid;
113 static volatile u_int	cpu_reset_proxy_active;
114 #endif
115 
116 char bootmethod[16];
117 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
118     "System firmware boot method");
119 
120 struct msr_op_arg {
121 	u_int msr;
122 	int op;
123 	uint64_t arg1;
124 	uint64_t *res;
125 };
126 
127 static void
128 x86_msr_op_one(void *argp)
129 {
130 	struct msr_op_arg *a;
131 	uint64_t v;
132 
133 	a = argp;
134 	switch (a->op) {
135 	case MSR_OP_ANDNOT:
136 		v = rdmsr(a->msr);
137 		v &= ~a->arg1;
138 		wrmsr(a->msr, v);
139 		break;
140 	case MSR_OP_OR:
141 		v = rdmsr(a->msr);
142 		v |= a->arg1;
143 		wrmsr(a->msr, v);
144 		break;
145 	case MSR_OP_WRITE:
146 		wrmsr(a->msr, a->arg1);
147 		break;
148 	case MSR_OP_READ:
149 		v = rdmsr(a->msr);
150 		*a->res = v;
151 		break;
152 	}
153 }
154 
155 #define	MSR_OP_EXMODE_MASK	0xf0000000
156 #define	MSR_OP_OP_MASK		0x000000ff
157 #define	MSR_OP_GET_CPUID(x)	(((x) & ~MSR_OP_EXMODE_MASK) >> 8)
158 
159 void
160 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res)
161 {
162 	struct thread *td;
163 	struct msr_op_arg a;
164 	cpuset_t set;
165 	u_int exmode;
166 	int bound_cpu, cpu, i, is_bound;
167 
168 	a.op = op & MSR_OP_OP_MASK;
169 	MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
170 	    a.op == MSR_OP_WRITE || a.op == MSR_OP_READ);
171 	exmode = op & MSR_OP_EXMODE_MASK;
172 	MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED_ALL ||
173 	    exmode == MSR_OP_SCHED_ONE || exmode == MSR_OP_RENDEZVOUS_ALL ||
174 	    exmode == MSR_OP_RENDEZVOUS_ONE);
175 	a.msr = msr;
176 	a.arg1 = arg1;
177 	a.res = res;
178 	switch (exmode) {
179 	case MSR_OP_LOCAL:
180 		x86_msr_op_one(&a);
181 		break;
182 	case MSR_OP_SCHED_ALL:
183 		td = curthread;
184 		thread_lock(td);
185 		is_bound = sched_is_bound(td);
186 		bound_cpu = td->td_oncpu;
187 		CPU_FOREACH(i) {
188 			sched_bind(td, i);
189 			x86_msr_op_one(&a);
190 		}
191 		if (is_bound)
192 			sched_bind(td, bound_cpu);
193 		else
194 			sched_unbind(td);
195 		thread_unlock(td);
196 		break;
197 	case MSR_OP_SCHED_ONE:
198 		td = curthread;
199 		cpu = MSR_OP_GET_CPUID(op);
200 		thread_lock(td);
201 		is_bound = sched_is_bound(td);
202 		bound_cpu = td->td_oncpu;
203 		if (!is_bound || bound_cpu != cpu)
204 			sched_bind(td, cpu);
205 		x86_msr_op_one(&a);
206 		if (is_bound) {
207 			if (bound_cpu != cpu)
208 				sched_bind(td, bound_cpu);
209 		} else {
210 			sched_unbind(td);
211 		}
212 		thread_unlock(td);
213 		break;
214 	case MSR_OP_RENDEZVOUS_ALL:
215 		smp_rendezvous(smp_no_rendezvous_barrier, x86_msr_op_one,
216 		    smp_no_rendezvous_barrier, &a);
217 		break;
218 	case MSR_OP_RENDEZVOUS_ONE:
219 		cpu = MSR_OP_GET_CPUID(op);
220 		CPU_SETOF(cpu, &set);
221 		smp_rendezvous_cpus(set, smp_no_rendezvous_barrier,
222 		    x86_msr_op_one, smp_no_rendezvous_barrier, &a);
223 		break;
224 	}
225 }
226 
227 /*
228  * Automatically initialized per CPU errata in cpu_idle_tun below.
229  */
230 bool mwait_cpustop_broken = false;
231 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
232     &mwait_cpustop_broken, 0,
233     "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
234 
235 /*
236  * Flush the D-cache for non-DMA I/O so that the I-cache can
237  * be made coherent later.
238  */
239 void
240 cpu_flush_dcache(void *ptr, size_t len)
241 {
242 	/* Not applicable */
243 }
244 
245 void
246 acpi_cpu_c1(void)
247 {
248 
249 	__asm __volatile("sti; hlt");
250 }
251 
252 /*
253  * Use mwait to pause execution while waiting for an interrupt or
254  * another thread to signal that there is more work.
255  *
256  * NOTE: Interrupts will cause a wakeup; however, this function does
257  * not enable interrupt handling. The caller is responsible to enable
258  * interrupts.
259  */
260 void
261 acpi_cpu_idle_mwait(uint32_t mwait_hint)
262 {
263 	int *state;
264 	uint64_t v;
265 
266 	/*
267 	 * A comment in Linux patch claims that 'CPUs run faster with
268 	 * speculation protection disabled. All CPU threads in a core
269 	 * must disable speculation protection for it to be
270 	 * disabled. Disable it while we are idle so the other
271 	 * hyperthread can run fast.'
272 	 *
273 	 * XXXKIB.  Software coordination mode should be supported,
274 	 * but all Intel CPUs provide hardware coordination.
275 	 */
276 
277 	state = &PCPU_PTR(monitorbuf)->idle_state;
278 	KASSERT(atomic_load_int(state) == STATE_SLEEPING,
279 	    ("cpu_mwait_cx: wrong monitorbuf state"));
280 	atomic_store_int(state, STATE_MWAIT);
281 	if (PCPU_GET(ibpb_set) || hw_ssb_active) {
282 		v = rdmsr(MSR_IA32_SPEC_CTRL);
283 		wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
284 		    IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
285 	} else {
286 		v = 0;
287 	}
288 	cpu_monitor(state, 0, 0);
289 	if (atomic_load_int(state) == STATE_MWAIT)
290 		cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
291 
292 	/*
293 	 * SSB cannot be disabled while we sleep, or rather, if it was
294 	 * disabled, the sysctl thread will bind to our cpu to tweak
295 	 * MSR.
296 	 */
297 	if (v != 0)
298 		wrmsr(MSR_IA32_SPEC_CTRL, v);
299 
300 	/*
301 	 * We should exit on any event that interrupts mwait, because
302 	 * that event might be a wanted interrupt.
303 	 */
304 	atomic_store_int(state, STATE_RUNNING);
305 }
306 
307 /* Get current clock frequency for the given cpu id. */
308 int
309 cpu_est_clockrate(int cpu_id, uint64_t *rate)
310 {
311 	uint64_t tsc1, tsc2;
312 	uint64_t acnt, mcnt, perf;
313 	register_t reg;
314 
315 	if (pcpu_find(cpu_id) == NULL || rate == NULL)
316 		return (EINVAL);
317 #ifdef __i386__
318 	if ((cpu_feature & CPUID_TSC) == 0)
319 		return (EOPNOTSUPP);
320 #endif
321 
322 	/*
323 	 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
324 	 * DELAY(9) based logic fails.
325 	 */
326 	if (tsc_is_invariant && !tsc_perf_stat)
327 		return (EOPNOTSUPP);
328 
329 #ifdef SMP
330 	if (smp_cpus > 1) {
331 		/* Schedule ourselves on the indicated cpu. */
332 		thread_lock(curthread);
333 		sched_bind(curthread, cpu_id);
334 		thread_unlock(curthread);
335 	}
336 #endif
337 
338 	/* Calibrate by measuring a short delay. */
339 	reg = intr_disable();
340 	if (tsc_is_invariant) {
341 		wrmsr(MSR_MPERF, 0);
342 		wrmsr(MSR_APERF, 0);
343 		tsc1 = rdtsc();
344 		DELAY(1000);
345 		mcnt = rdmsr(MSR_MPERF);
346 		acnt = rdmsr(MSR_APERF);
347 		tsc2 = rdtsc();
348 		intr_restore(reg);
349 		perf = 1000 * acnt / mcnt;
350 		*rate = (tsc2 - tsc1) * perf;
351 	} else {
352 		tsc1 = rdtsc();
353 		DELAY(1000);
354 		tsc2 = rdtsc();
355 		intr_restore(reg);
356 		*rate = (tsc2 - tsc1) * 1000;
357 	}
358 
359 #ifdef SMP
360 	if (smp_cpus > 1) {
361 		thread_lock(curthread);
362 		sched_unbind(curthread);
363 		thread_unlock(curthread);
364 	}
365 #endif
366 
367 	return (0);
368 }
369 
370 /*
371  * Shutdown the CPU as much as possible
372  */
373 void
374 cpu_halt(void)
375 {
376 	for (;;)
377 		halt();
378 }
379 
380 static void
381 cpu_reset_real(void)
382 {
383 	struct region_descriptor null_idt;
384 	int b;
385 
386 	disable_intr();
387 #ifdef CPU_ELAN
388 	if (elan_mmcr != NULL)
389 		elan_mmcr->RESCFG = 1;
390 #endif
391 #ifdef __i386__
392 	if (cpu == CPU_GEODE1100) {
393 		/* Attempt Geode's own reset */
394 		outl(0xcf8, 0x80009044ul);
395 		outl(0xcfc, 0xf);
396 	}
397 #endif
398 #if !defined(BROKEN_KEYBOARD_RESET)
399 	/*
400 	 * Attempt to do a CPU reset via the keyboard controller,
401 	 * do not turn off GateA20, as any machine that fails
402 	 * to do the reset here would then end up in no man's land.
403 	 */
404 	outb(IO_KBD + 4, 0xFE);
405 	DELAY(500000);	/* wait 0.5 sec to see if that did it */
406 #endif
407 
408 	/*
409 	 * Attempt to force a reset via the Reset Control register at
410 	 * I/O port 0xcf9.  Bit 2 forces a system reset when it
411 	 * transitions from 0 to 1.  Bit 1 selects the type of reset
412 	 * to attempt: 0 selects a "soft" reset, and 1 selects a
413 	 * "hard" reset.  We try a "hard" reset.  The first write sets
414 	 * bit 1 to select a "hard" reset and clears bit 2.  The
415 	 * second write forces a 0 -> 1 transition in bit 2 to trigger
416 	 * a reset.
417 	 */
418 	outb(0xcf9, 0x2);
419 	outb(0xcf9, 0x6);
420 	DELAY(500000);  /* wait 0.5 sec to see if that did it */
421 
422 	/*
423 	 * Attempt to force a reset via the Fast A20 and Init register
424 	 * at I/O port 0x92.  Bit 1 serves as an alternate A20 gate.
425 	 * Bit 0 asserts INIT# when set to 1.  We are careful to only
426 	 * preserve bit 1 while setting bit 0.  We also must clear bit
427 	 * 0 before setting it if it isn't already clear.
428 	 */
429 	b = inb(0x92);
430 	if (b != 0xff) {
431 		if ((b & 0x1) != 0)
432 			outb(0x92, b & 0xfe);
433 		outb(0x92, b | 0x1);
434 		DELAY(500000);  /* wait 0.5 sec to see if that did it */
435 	}
436 
437 	printf("No known reset method worked, attempting CPU shutdown\n");
438 	DELAY(1000000); /* wait 1 sec for printf to complete */
439 
440 	/* Wipe the IDT. */
441 	null_idt.rd_limit = 0;
442 	null_idt.rd_base = 0;
443 	lidt(&null_idt);
444 
445 	/* "good night, sweet prince .... <THUNK!>" */
446 	breakpoint();
447 
448 	/* NOTREACHED */
449 	while(1);
450 }
451 
452 #ifdef SMP
453 static void
454 cpu_reset_proxy(void)
455 {
456 
457 	cpu_reset_proxy_active = 1;
458 	while (cpu_reset_proxy_active == 1)
459 		ia32_pause(); /* Wait for other cpu to see that we've started */
460 
461 	printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
462 	DELAY(1000000);
463 	cpu_reset_real();
464 }
465 #endif
466 
467 void
468 cpu_reset(void)
469 {
470 #ifdef SMP
471 	struct monitorbuf *mb;
472 	cpuset_t map;
473 	u_int cnt;
474 
475 	if (smp_started) {
476 		map = all_cpus;
477 		CPU_CLR(PCPU_GET(cpuid), &map);
478 		CPU_ANDNOT(&map, &stopped_cpus);
479 		if (!CPU_EMPTY(&map)) {
480 			printf("cpu_reset: Stopping other CPUs\n");
481 			stop_cpus(map);
482 		}
483 
484 		if (PCPU_GET(cpuid) != 0) {
485 			cpu_reset_proxyid = PCPU_GET(cpuid);
486 			cpustop_restartfunc = cpu_reset_proxy;
487 			cpu_reset_proxy_active = 0;
488 			printf("cpu_reset: Restarting BSP\n");
489 
490 			/* Restart CPU #0. */
491 			CPU_SETOF(0, &started_cpus);
492 			mb = &pcpu_find(0)->pc_monitorbuf;
493 			atomic_store_int(&mb->stop_state,
494 			    MONITOR_STOPSTATE_RUNNING);
495 
496 			cnt = 0;
497 			while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
498 				ia32_pause();
499 				cnt++;	/* Wait for BSP to announce restart */
500 			}
501 			if (cpu_reset_proxy_active == 0) {
502 				printf("cpu_reset: Failed to restart BSP\n");
503 			} else {
504 				cpu_reset_proxy_active = 2;
505 				while (1)
506 					ia32_pause();
507 				/* NOTREACHED */
508 			}
509 		}
510 
511 		DELAY(1000000);
512 	}
513 #endif
514 	cpu_reset_real();
515 	/* NOTREACHED */
516 }
517 
518 bool
519 cpu_mwait_usable(void)
520 {
521 
522 	return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
523 	    (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
524 	    (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
525 }
526 
527 void (*cpu_idle_hook)(sbintime_t) = NULL;	/* ACPI idle hook. */
528 
529 int cpu_amdc1e_bug = 0;			/* AMD C1E APIC workaround required. */
530 
531 static int	idle_mwait = 1;		/* Use MONITOR/MWAIT for short idle. */
532 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
533     0, "Use MONITOR/MWAIT for short idle");
534 
535 static void
536 cpu_idle_acpi(sbintime_t sbt)
537 {
538 	int *state;
539 
540 	state = &PCPU_PTR(monitorbuf)->idle_state;
541 	atomic_store_int(state, STATE_SLEEPING);
542 
543 	/* See comments in cpu_idle_hlt(). */
544 	disable_intr();
545 	if (sched_runnable())
546 		enable_intr();
547 	else if (cpu_idle_hook)
548 		cpu_idle_hook(sbt);
549 	else
550 		acpi_cpu_c1();
551 	atomic_store_int(state, STATE_RUNNING);
552 }
553 
554 static void
555 cpu_idle_hlt(sbintime_t sbt)
556 {
557 	int *state;
558 
559 	state = &PCPU_PTR(monitorbuf)->idle_state;
560 	atomic_store_int(state, STATE_SLEEPING);
561 
562 	/*
563 	 * Since we may be in a critical section from cpu_idle(), if
564 	 * an interrupt fires during that critical section we may have
565 	 * a pending preemption.  If the CPU halts, then that thread
566 	 * may not execute until a later interrupt awakens the CPU.
567 	 * To handle this race, check for a runnable thread after
568 	 * disabling interrupts and immediately return if one is
569 	 * found.  Also, we must absolutely guarentee that hlt is
570 	 * the next instruction after sti.  This ensures that any
571 	 * interrupt that fires after the call to disable_intr() will
572 	 * immediately awaken the CPU from hlt.  Finally, please note
573 	 * that on x86 this works fine because of interrupts enabled only
574 	 * after the instruction following sti takes place, while IF is set
575 	 * to 1 immediately, allowing hlt instruction to acknowledge the
576 	 * interrupt.
577 	 */
578 	disable_intr();
579 	if (sched_runnable())
580 		enable_intr();
581 	else
582 		acpi_cpu_c1();
583 	atomic_store_int(state, STATE_RUNNING);
584 }
585 
586 static void
587 cpu_idle_mwait(sbintime_t sbt)
588 {
589 	int *state;
590 
591 	state = &PCPU_PTR(monitorbuf)->idle_state;
592 	atomic_store_int(state, STATE_MWAIT);
593 
594 	/* See comments in cpu_idle_hlt(). */
595 	disable_intr();
596 	if (sched_runnable()) {
597 		atomic_store_int(state, STATE_RUNNING);
598 		enable_intr();
599 		return;
600 	}
601 
602 	cpu_monitor(state, 0, 0);
603 	if (atomic_load_int(state) == STATE_MWAIT)
604 		__asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
605 	else
606 		enable_intr();
607 	atomic_store_int(state, STATE_RUNNING);
608 }
609 
610 static void
611 cpu_idle_spin(sbintime_t sbt)
612 {
613 	int *state;
614 	int i;
615 
616 	state = &PCPU_PTR(monitorbuf)->idle_state;
617 	atomic_store_int(state, STATE_RUNNING);
618 
619 	/*
620 	 * The sched_runnable() call is racy but as long as there is
621 	 * a loop missing it one time will have just a little impact if any
622 	 * (and it is much better than missing the check at all).
623 	 */
624 	for (i = 0; i < 1000; i++) {
625 		if (sched_runnable())
626 			return;
627 		cpu_spinwait();
628 	}
629 }
630 
631 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
632 
633 void
634 cpu_idle(int busy)
635 {
636 	uint64_t msr;
637 	sbintime_t sbt = -1;
638 
639 	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
640 	    busy, curcpu);
641 #ifdef MP_WATCHDOG
642 	ap_watchdog(PCPU_GET(cpuid));
643 #endif
644 
645 	/* If we are busy - try to use fast methods. */
646 	if (busy) {
647 		if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
648 			cpu_idle_mwait(busy);
649 			goto out;
650 		}
651 	}
652 
653 	/* If we have time - switch timers into idle mode. */
654 	if (!busy) {
655 		critical_enter();
656 		sbt = cpu_idleclock();
657 	}
658 
659 	/* Apply AMD APIC timer C1E workaround. */
660 	if (cpu_amdc1e_bug && cpu_disable_c3_sleep) {
661 		msr = rdmsr(MSR_AMDK8_IPM);
662 		if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0)
663 			wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT |
664 			    AMDK8_C1EONCMPHALT));
665 	}
666 
667 	/* Call main idle method. */
668 	cpu_idle_fn(sbt);
669 
670 	/* Switch timers back into active mode. */
671 	if (!busy) {
672 		cpu_activeclock();
673 		critical_exit();
674 	}
675 out:
676 	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
677 	    busy, curcpu);
678 }
679 
680 static int cpu_idle_apl31_workaround;
681 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW,
682     &cpu_idle_apl31_workaround, 0,
683     "Apollo Lake APL31 MWAIT bug workaround");
684 
685 int
686 cpu_idle_wakeup(int cpu)
687 {
688 	struct monitorbuf *mb;
689 	int *state;
690 
691 	mb = &pcpu_find(cpu)->pc_monitorbuf;
692 	state = &mb->idle_state;
693 	switch (atomic_load_int(state)) {
694 	case STATE_SLEEPING:
695 		return (0);
696 	case STATE_MWAIT:
697 		atomic_store_int(state, STATE_RUNNING);
698 		return (cpu_idle_apl31_workaround ? 0 : 1);
699 	case STATE_RUNNING:
700 		return (1);
701 	default:
702 		panic("bad monitor state");
703 		return (1);
704 	}
705 }
706 
707 /*
708  * Ordered by speed/power consumption.
709  */
710 static struct {
711 	void	*id_fn;
712 	char	*id_name;
713 	int	id_cpuid2_flag;
714 } idle_tbl[] = {
715 	{ .id_fn = cpu_idle_spin, .id_name = "spin" },
716 	{ .id_fn = cpu_idle_mwait, .id_name = "mwait",
717 	    .id_cpuid2_flag = CPUID2_MON },
718 	{ .id_fn = cpu_idle_hlt, .id_name = "hlt" },
719 	{ .id_fn = cpu_idle_acpi, .id_name = "acpi" },
720 };
721 
722 static int
723 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
724 {
725 	char *avail, *p;
726 	int error;
727 	int i;
728 
729 	avail = malloc(256, M_TEMP, M_WAITOK);
730 	p = avail;
731 	for (i = 0; i < nitems(idle_tbl); i++) {
732 		if (idle_tbl[i].id_cpuid2_flag != 0 &&
733 		    (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
734 			continue;
735 		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
736 		    cpu_idle_hook == NULL)
737 			continue;
738 		p += sprintf(p, "%s%s", p != avail ? ", " : "",
739 		    idle_tbl[i].id_name);
740 	}
741 	error = sysctl_handle_string(oidp, avail, 0, req);
742 	free(avail, M_TEMP);
743 	return (error);
744 }
745 
746 SYSCTL_PROC(_machdep, OID_AUTO, idle_available,
747     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
748     0, 0, idle_sysctl_available, "A",
749     "list of available idle functions");
750 
751 static bool
752 cpu_idle_selector(const char *new_idle_name)
753 {
754 	int i;
755 
756 	for (i = 0; i < nitems(idle_tbl); i++) {
757 		if (idle_tbl[i].id_cpuid2_flag != 0 &&
758 		    (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
759 			continue;
760 		if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
761 		    cpu_idle_hook == NULL)
762 			continue;
763 		if (strcmp(idle_tbl[i].id_name, new_idle_name))
764 			continue;
765 		cpu_idle_fn = idle_tbl[i].id_fn;
766 		if (bootverbose)
767 			printf("CPU idle set to %s\n", idle_tbl[i].id_name);
768 		return (true);
769 	}
770 	return (false);
771 }
772 
773 static int
774 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
775 {
776 	char buf[16], *p;
777 	int error, i;
778 
779 	p = "unknown";
780 	for (i = 0; i < nitems(idle_tbl); i++) {
781 		if (idle_tbl[i].id_fn == cpu_idle_fn) {
782 			p = idle_tbl[i].id_name;
783 			break;
784 		}
785 	}
786 	strncpy(buf, p, sizeof(buf));
787 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
788 	if (error != 0 || req->newptr == NULL)
789 		return (error);
790 	return (cpu_idle_selector(buf) ? 0 : EINVAL);
791 }
792 
793 SYSCTL_PROC(_machdep, OID_AUTO, idle,
794     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
795     0, 0, cpu_idle_sysctl, "A",
796     "currently selected idle function");
797 
798 static void
799 cpu_idle_tun(void *unused __unused)
800 {
801 	char tunvar[16];
802 
803 	if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
804 		cpu_idle_selector(tunvar);
805 	else if (cpu_vendor_id == CPU_VENDOR_AMD &&
806 	    CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
807 		/* Ryzen erratas 1057, 1109. */
808 		cpu_idle_selector("hlt");
809 		idle_mwait = 0;
810 		mwait_cpustop_broken = true;
811 	}
812 
813 	if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) {
814 		/*
815 		 * Apollo Lake errata APL31 (public errata APL30).
816 		 * Stores to the armed address range may not trigger
817 		 * MWAIT to resume execution.  OS needs to use
818 		 * interrupts to wake processors from MWAIT-induced
819 		 * sleep states.
820 		 */
821 		cpu_idle_apl31_workaround = 1;
822 		mwait_cpustop_broken = true;
823 	}
824 	TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
825 }
826 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
827 
828 static int panic_on_nmi = 0xff;
829 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
830     &panic_on_nmi, 0,
831     "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all");
832 int nmi_is_broadcast = 1;
833 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
834     &nmi_is_broadcast, 0,
835     "Chipset NMI is broadcast");
836 int (*apei_nmi)(void);
837 
838 void
839 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
840 {
841 	bool claimed = false;
842 
843 #ifdef DEV_ISA
844 	/* machine/parity/power fail/"kitchen sink" faults */
845 	if (isa_nmi(frame->tf_err)) {
846 		claimed = true;
847 		if ((panic_on_nmi & 1) != 0)
848 			panic("NMI indicates hardware failure");
849 	}
850 #endif /* DEV_ISA */
851 
852 	/* ACPI Platform Error Interfaces callback. */
853 	if (apei_nmi != NULL && (*apei_nmi)())
854 		claimed = true;
855 
856 	/*
857 	 * NMIs can be useful for debugging.  They can be hooked up to a
858 	 * pushbutton, usually on an ISA, PCI, or PCIe card.  They can also be
859 	 * generated by an IPMI BMC, either manually or in response to a
860 	 * watchdog timeout.  For example, see the "power diag" command in
861 	 * ports/sysutils/ipmitool.  They can also be generated by a
862 	 * hypervisor; see "bhyvectl --inject-nmi".
863 	 */
864 
865 #ifdef KDB
866 	if (!claimed && (panic_on_nmi & 2) != 0) {
867 		if (debugger_on_panic) {
868 			printf("NMI/cpu%d ... going to debugger\n", cpu);
869 			claimed = kdb_trap(type, 0, frame);
870 		}
871 	}
872 #endif /* KDB */
873 
874 	if (!claimed && panic_on_nmi != 0)
875 		panic("NMI");
876 }
877 
878 void
879 nmi_handle_intr(u_int type, struct trapframe *frame)
880 {
881 
882 #ifdef SMP
883 	if (nmi_is_broadcast) {
884 		nmi_call_kdb_smp(type, frame);
885 		return;
886 	}
887 #endif
888 	nmi_call_kdb(PCPU_GET(cpuid), type, frame);
889 }
890 
891 static int hw_ibrs_active;
892 int hw_ibrs_ibpb_active;
893 int hw_ibrs_disable = 1;
894 
895 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
896     "Indirect Branch Restricted Speculation active");
897 
898 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs,
899     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
900     "Indirect Branch Restricted Speculation active");
901 
902 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
903     &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
904 
905 void
906 hw_ibrs_recalculate(bool for_all_cpus)
907 {
908 	if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
909 		x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
910 		    MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) |
911 		    (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
912 		    IA32_SPEC_CTRL_IBRS, NULL);
913 		hw_ibrs_active = hw_ibrs_disable == 0;
914 		hw_ibrs_ibpb_active = 0;
915 	} else {
916 		hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
917 		    CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
918 	}
919 }
920 
921 static int
922 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
923 {
924 	int error, val;
925 
926 	val = hw_ibrs_disable;
927 	error = sysctl_handle_int(oidp, &val, 0, req);
928 	if (error != 0 || req->newptr == NULL)
929 		return (error);
930 	hw_ibrs_disable = val != 0;
931 	hw_ibrs_recalculate(true);
932 	return (0);
933 }
934 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
935     CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
936     "Disable Indirect Branch Restricted Speculation");
937 
938 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
939     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
940     hw_ibrs_disable_handler, "I",
941     "Disable Indirect Branch Restricted Speculation");
942 
943 int hw_ssb_active;
944 int hw_ssb_disable;
945 
946 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
947     &hw_ssb_active, 0,
948     "Speculative Store Bypass Disable active");
949 
950 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb,
951     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
952     "Speculative Store Bypass Disable active");
953 
954 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
955     &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
956 
957 static void
958 hw_ssb_set(bool enable, bool for_all_cpus)
959 {
960 
961 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
962 		hw_ssb_active = 0;
963 		return;
964 	}
965 	hw_ssb_active = enable;
966 	x86_msr_op(MSR_IA32_SPEC_CTRL,
967 	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
968 	    (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL),
969 	    IA32_SPEC_CTRL_SSBD, NULL);
970 }
971 
972 void
973 hw_ssb_recalculate(bool all_cpus)
974 {
975 
976 	switch (hw_ssb_disable) {
977 	default:
978 		hw_ssb_disable = 0;
979 		/* FALLTHROUGH */
980 	case 0: /* off */
981 		hw_ssb_set(false, all_cpus);
982 		break;
983 	case 1: /* on */
984 		hw_ssb_set(true, all_cpus);
985 		break;
986 	case 2: /* auto */
987 		hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
988 		    false : true, all_cpus);
989 		break;
990 	}
991 }
992 
993 static int
994 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
995 {
996 	int error, val;
997 
998 	val = hw_ssb_disable;
999 	error = sysctl_handle_int(oidp, &val, 0, req);
1000 	if (error != 0 || req->newptr == NULL)
1001 		return (error);
1002 	hw_ssb_disable = val;
1003 	hw_ssb_recalculate(true);
1004 	return (0);
1005 }
1006 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
1007     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1008     hw_ssb_disable_handler, "I",
1009     "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1010 
1011 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
1012     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1013     hw_ssb_disable_handler, "I",
1014     "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1015 
1016 int hw_mds_disable;
1017 
1018 /*
1019  * Handler for Microarchitectural Data Sampling issues.  Really not a
1020  * pointer to C function: on amd64 the code must not change any CPU
1021  * architectural state except possibly %rflags. Also, it is always
1022  * called with interrupts disabled.
1023  */
1024 void mds_handler_void(void);
1025 void mds_handler_verw(void);
1026 void mds_handler_ivb(void);
1027 void mds_handler_bdw(void);
1028 void mds_handler_skl_sse(void);
1029 void mds_handler_skl_avx(void);
1030 void mds_handler_skl_avx512(void);
1031 void mds_handler_silvermont(void);
1032 void (*mds_handler)(void) = mds_handler_void;
1033 
1034 static int
1035 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
1036 {
1037 	const char *state;
1038 
1039 	if (mds_handler == mds_handler_void)
1040 		state = "inactive";
1041 	else if (mds_handler == mds_handler_verw)
1042 		state = "VERW";
1043 	else if (mds_handler == mds_handler_ivb)
1044 		state = "software IvyBridge";
1045 	else if (mds_handler == mds_handler_bdw)
1046 		state = "software Broadwell";
1047 	else if (mds_handler == mds_handler_skl_sse)
1048 		state = "software Skylake SSE";
1049 	else if (mds_handler == mds_handler_skl_avx)
1050 		state = "software Skylake AVX";
1051 	else if (mds_handler == mds_handler_skl_avx512)
1052 		state = "software Skylake AVX512";
1053 	else if (mds_handler == mds_handler_silvermont)
1054 		state = "software Silvermont";
1055 	else
1056 		state = "unknown";
1057 	return (SYSCTL_OUT(req, state, strlen(state)));
1058 }
1059 
1060 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1061     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1062     sysctl_hw_mds_disable_state_handler, "A",
1063     "Microarchitectural Data Sampling Mitigation state");
1064 
1065 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds,
1066     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1067     "Microarchitectural Data Sampling Mitigation state");
1068 
1069 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1070     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1071     sysctl_hw_mds_disable_state_handler, "A",
1072     "Microarchitectural Data Sampling Mitigation state");
1073 
1074 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1075 
1076 void
1077 hw_mds_recalculate(void)
1078 {
1079 	struct pcpu *pc;
1080 	vm_offset_t b64;
1081 	u_long xcr0;
1082 	int i;
1083 
1084 	/*
1085 	 * Allow user to force VERW variant even if MD_CLEAR is not
1086 	 * reported.  For instance, hypervisor might unknowingly
1087 	 * filter the cap out.
1088 	 * For the similar reasons, and for testing, allow to enable
1089 	 * mitigation even when MDS_NO cap is set.
1090 	 */
1091 	if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1092 	    ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 &&
1093 	    hw_mds_disable == 3)) {
1094 		mds_handler = mds_handler_void;
1095 	} else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1096 	    hw_mds_disable == 3) || hw_mds_disable == 1) {
1097 		mds_handler = mds_handler_verw;
1098 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1099 	    (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1100 	    CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1101 	    CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1102 	    CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1103 	    CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1104 	    CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1105 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1106 		/*
1107 		 * Nehalem, SandyBridge, IvyBridge
1108 		 */
1109 		CPU_FOREACH(i) {
1110 			pc = pcpu_find(i);
1111 			if (pc->pc_mds_buf == NULL) {
1112 				pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1113 				    DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1114 				bzero(pc->pc_mds_buf, 16);
1115 			}
1116 		}
1117 		mds_handler = mds_handler_ivb;
1118 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1119 	    (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1120 	    CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1121 	    CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1122 	    CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1123 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1124 		/*
1125 		 * Haswell, Broadwell
1126 		 */
1127 		CPU_FOREACH(i) {
1128 			pc = pcpu_find(i);
1129 			if (pc->pc_mds_buf == NULL) {
1130 				pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1131 				    DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1132 				bzero(pc->pc_mds_buf, 16);
1133 			}
1134 		}
1135 		mds_handler = mds_handler_bdw;
1136 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1137 	    ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1138 	    CPUID_STEPPING) <= 5) ||
1139 	    CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1140 	    (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1141 	    CPUID_STEPPING) <= 0xb) ||
1142 	    (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1143 	    CPUID_STEPPING) <= 0xc)) &&
1144 	    (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1145 		/*
1146 		 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1147 		 * CascadeLake
1148 		 */
1149 		CPU_FOREACH(i) {
1150 			pc = pcpu_find(i);
1151 			if (pc->pc_mds_buf == NULL) {
1152 				pc->pc_mds_buf = malloc_domainset(6 * 1024,
1153 				    M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1154 				    M_WAITOK);
1155 				b64 = (vm_offset_t)malloc_domainset(64 + 63,
1156 				    M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1157 				    M_WAITOK);
1158 				pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1159 				bzero(pc->pc_mds_buf64, 64);
1160 			}
1161 		}
1162 		xcr0 = rxcr(0);
1163 		if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1164 		    (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1165 			mds_handler = mds_handler_skl_avx512;
1166 		else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1167 		    (cpu_feature2 & CPUID2_AVX) != 0)
1168 			mds_handler = mds_handler_skl_avx;
1169 		else
1170 			mds_handler = mds_handler_skl_sse;
1171 	} else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1172 	    ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1173 	    CPUID_TO_MODEL(cpu_id) == 0x4a ||
1174 	    CPUID_TO_MODEL(cpu_id) == 0x4c ||
1175 	    CPUID_TO_MODEL(cpu_id) == 0x4d ||
1176 	    CPUID_TO_MODEL(cpu_id) == 0x5a ||
1177 	    CPUID_TO_MODEL(cpu_id) == 0x5d ||
1178 	    CPUID_TO_MODEL(cpu_id) == 0x6e ||
1179 	    CPUID_TO_MODEL(cpu_id) == 0x65 ||
1180 	    CPUID_TO_MODEL(cpu_id) == 0x75 ||
1181 	    CPUID_TO_MODEL(cpu_id) == 0x1c ||
1182 	    CPUID_TO_MODEL(cpu_id) == 0x26 ||
1183 	    CPUID_TO_MODEL(cpu_id) == 0x27 ||
1184 	    CPUID_TO_MODEL(cpu_id) == 0x35 ||
1185 	    CPUID_TO_MODEL(cpu_id) == 0x36 ||
1186 	    CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1187 		/* Silvermont, Airmont */
1188 		CPU_FOREACH(i) {
1189 			pc = pcpu_find(i);
1190 			if (pc->pc_mds_buf == NULL)
1191 				pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1192 		}
1193 		mds_handler = mds_handler_silvermont;
1194 	} else {
1195 		hw_mds_disable = 0;
1196 		mds_handler = mds_handler_void;
1197 	}
1198 }
1199 
1200 static void
1201 hw_mds_recalculate_boot(void *arg __unused)
1202 {
1203 
1204 	hw_mds_recalculate();
1205 }
1206 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1207 
1208 static int
1209 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1210 {
1211 	int error, val;
1212 
1213 	val = hw_mds_disable;
1214 	error = sysctl_handle_int(oidp, &val, 0, req);
1215 	if (error != 0 || req->newptr == NULL)
1216 		return (error);
1217 	if (val < 0 || val > 3)
1218 		return (EINVAL);
1219 	hw_mds_disable = val;
1220 	hw_mds_recalculate();
1221 	return (0);
1222 }
1223 
1224 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1225     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1226     sysctl_mds_disable_handler, "I",
1227     "Microarchitectural Data Sampling Mitigation "
1228     "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1229 
1230 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1231     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1232     sysctl_mds_disable_handler, "I",
1233     "Microarchitectural Data Sampling Mitigation "
1234     "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1235 
1236 /*
1237  * Intel Transactional Memory Asynchronous Abort Mitigation
1238  * CVE-2019-11135
1239  */
1240 int x86_taa_enable;
1241 int x86_taa_state;
1242 enum {
1243 	TAA_NONE	= 0,	/* No mitigation enabled */
1244 	TAA_TSX_DISABLE	= 1,	/* Disable TSX via MSR */
1245 	TAA_VERW	= 2,	/* Use VERW mitigation */
1246 	TAA_AUTO	= 3,	/* Automatically select the mitigation */
1247 
1248 	/* The states below are not selectable by the operator */
1249 
1250 	TAA_TAA_UC	= 4,	/* Mitigation present in microcode */
1251 	TAA_NOT_PRESENT	= 5	/* TSX is not present */
1252 };
1253 
1254 static void
1255 taa_set(bool enable, bool all)
1256 {
1257 
1258 	x86_msr_op(MSR_IA32_TSX_CTRL,
1259 	    (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1260 	    (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1261 	    IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR,
1262 	    NULL);
1263 }
1264 
1265 void
1266 x86_taa_recalculate(void)
1267 {
1268 	static int taa_saved_mds_disable = 0;
1269 	int taa_need = 0, taa_state = 0;
1270 	int mds_disable = 0, need_mds_recalc = 0;
1271 
1272 	/* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1273 	if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1274 	    (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1275 		/* TSX is not present */
1276 		x86_taa_state = TAA_NOT_PRESENT;
1277 		return;
1278 	}
1279 
1280 	/* Check to see what mitigation options the CPU gives us */
1281 	if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1282 		/* CPU is not suseptible to TAA */
1283 		taa_need = TAA_TAA_UC;
1284 	} else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1285 		/*
1286 		 * CPU can turn off TSX.  This is the next best option
1287 		 * if TAA_NO hardware mitigation isn't present
1288 		 */
1289 		taa_need = TAA_TSX_DISABLE;
1290 	} else {
1291 		/* No TSX/TAA specific remedies are available. */
1292 		if (x86_taa_enable == TAA_TSX_DISABLE) {
1293 			if (bootverbose)
1294 				printf("TSX control not available\n");
1295 			return;
1296 		} else
1297 			taa_need = TAA_VERW;
1298 	}
1299 
1300 	/* Can we automatically take action, or are we being forced? */
1301 	if (x86_taa_enable == TAA_AUTO)
1302 		taa_state = taa_need;
1303 	else
1304 		taa_state = x86_taa_enable;
1305 
1306 	/* No state change, nothing to do */
1307 	if (taa_state == x86_taa_state) {
1308 		if (bootverbose)
1309 			printf("No TSX change made\n");
1310 		return;
1311 	}
1312 
1313 	/* Does the MSR need to be turned on or off? */
1314 	if (taa_state == TAA_TSX_DISABLE)
1315 		taa_set(true, true);
1316 	else if (x86_taa_state == TAA_TSX_DISABLE)
1317 		taa_set(false, true);
1318 
1319 	/* Does MDS need to be set to turn on VERW? */
1320 	if (taa_state == TAA_VERW) {
1321 		taa_saved_mds_disable = hw_mds_disable;
1322 		mds_disable = hw_mds_disable = 1;
1323 		need_mds_recalc = 1;
1324 	} else if (x86_taa_state == TAA_VERW) {
1325 		mds_disable = hw_mds_disable = taa_saved_mds_disable;
1326 		need_mds_recalc = 1;
1327 	}
1328 	if (need_mds_recalc) {
1329 		hw_mds_recalculate();
1330 		if (mds_disable != hw_mds_disable) {
1331 			if (bootverbose)
1332 				printf("Cannot change MDS state for TAA\n");
1333 			/* Don't update our state */
1334 			return;
1335 		}
1336 	}
1337 
1338 	x86_taa_state = taa_state;
1339 	return;
1340 }
1341 
1342 static void
1343 taa_recalculate_boot(void * arg __unused)
1344 {
1345 
1346 	x86_taa_recalculate();
1347 }
1348 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1349 
1350 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa,
1351     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1352     "TSX Asynchronous Abort Mitigation");
1353 
1354 static int
1355 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1356 {
1357 	int error, val;
1358 
1359 	val = x86_taa_enable;
1360 	error = sysctl_handle_int(oidp, &val, 0, req);
1361 	if (error != 0 || req->newptr == NULL)
1362 		return (error);
1363 	if (val < TAA_NONE || val > TAA_AUTO)
1364 		return (EINVAL);
1365 	x86_taa_enable = val;
1366 	x86_taa_recalculate();
1367 	return (0);
1368 }
1369 
1370 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1371     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1372     sysctl_taa_handler, "I",
1373     "TAA Mitigation enablement control "
1374     "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)");
1375 
1376 static int
1377 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1378 {
1379 	const char *state;
1380 
1381 	switch (x86_taa_state) {
1382 	case TAA_NONE:
1383 		state = "inactive";
1384 		break;
1385 	case TAA_TSX_DISABLE:
1386 		state = "TSX disabled";
1387 		break;
1388 	case TAA_VERW:
1389 		state = "VERW";
1390 		break;
1391 	case TAA_TAA_UC:
1392 		state = "Mitigated in microcode";
1393 		break;
1394 	case TAA_NOT_PRESENT:
1395 		state = "TSX not present";
1396 		break;
1397 	default:
1398 		state = "unknown";
1399 	}
1400 
1401 	return (SYSCTL_OUT(req, state, strlen(state)));
1402 }
1403 
1404 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1405     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1406     sysctl_taa_state_handler, "A",
1407     "TAA Mitigation state");
1408 
1409 int __read_frequently cpu_flush_rsb_ctxsw;
1410 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
1411     CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
1412     "Flush Return Stack Buffer on context switch");
1413 
1414 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds,
1415     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1416     "MCU Optimization, disable RDSEED mitigation");
1417 
1418 int x86_rngds_mitg_enable = 1;
1419 void
1420 x86_rngds_mitg_recalculate(bool all_cpus)
1421 {
1422 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0)
1423 		return;
1424 	x86_msr_op(MSR_IA32_MCU_OPT_CTRL,
1425 	    (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1426 	    (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1427 	    IA32_RNGDS_MITG_DIS, NULL);
1428 }
1429 
1430 static int
1431 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)
1432 {
1433 	int error, val;
1434 
1435 	val = x86_rngds_mitg_enable;
1436 	error = sysctl_handle_int(oidp, &val, 0, req);
1437 	if (error != 0 || req->newptr == NULL)
1438 		return (error);
1439 	x86_rngds_mitg_enable = val;
1440 	x86_rngds_mitg_recalculate(true);
1441 	return (0);
1442 }
1443 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT |
1444     CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1445     sysctl_rngds_mitg_enable_handler, "I",
1446     "MCU Optimization, disabling RDSEED mitigation control "
1447     "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)");
1448 
1449 static int
1450 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)
1451 {
1452 	const char *state;
1453 
1454 	if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) {
1455 		state = "Not applicable";
1456 	} else if (x86_rngds_mitg_enable == 0) {
1457 		state = "RDSEED not serialized";
1458 	} else {
1459 		state = "Mitigated";
1460 	}
1461 	return (SYSCTL_OUT(req, state, strlen(state)));
1462 }
1463 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state,
1464     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1465     sysctl_rngds_state_handler, "A",
1466     "MCU Optimization state");
1467 
1468 /*
1469  * Enable and restore kernel text write permissions.
1470  * Callers must ensure that disable_wp()/restore_wp() are executed
1471  * without rescheduling on the same core.
1472  */
1473 bool
1474 disable_wp(void)
1475 {
1476 	u_int cr0;
1477 
1478 	cr0 = rcr0();
1479 	if ((cr0 & CR0_WP) == 0)
1480 		return (false);
1481 	load_cr0(cr0 & ~CR0_WP);
1482 	return (true);
1483 }
1484 
1485 void
1486 restore_wp(bool old_wp)
1487 {
1488 
1489 	if (old_wp)
1490 		load_cr0(rcr0() | CR0_WP);
1491 }
1492 
1493 bool
1494 acpi_get_fadt_bootflags(uint16_t *flagsp)
1495 {
1496 #ifdef DEV_ACPI
1497 	ACPI_TABLE_FADT *fadt;
1498 	vm_paddr_t physaddr;
1499 
1500 	physaddr = acpi_find_table(ACPI_SIG_FADT);
1501 	if (physaddr == 0)
1502 		return (false);
1503 	fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1504 	if (fadt == NULL)
1505 		return (false);
1506 	*flagsp = fadt->BootFlags;
1507 	acpi_unmap_table(fadt);
1508 	return (true);
1509 #else
1510 	return (false);
1511 #endif
1512 }
1513 
1514 DEFINE_IFUNC(, uint64_t, rdtsc_ordered, (void))
1515 {
1516 	bool cpu_is_amd = cpu_vendor_id == CPU_VENDOR_AMD ||
1517 	    cpu_vendor_id == CPU_VENDOR_HYGON;
1518 
1519 	if ((amd_feature & AMDID_RDTSCP) != 0)
1520 		return (rdtscp);
1521 	else if ((cpu_feature & CPUID_SSE2) != 0)
1522 		return (cpu_is_amd ? rdtsc_ordered_mfence :
1523 		    rdtsc_ordered_lfence);
1524 	else
1525 		return (rdtsc);
1526 }
1527