xref: /freebsd/sys/amd64/amd64/initcpu.c (revision ebaea1bc)
1cda07865SPeter Wemm /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
3c49761ddSPedro F. Giffuni  *
44536af6aSKATO Takenori  * Copyright (c) KATO Takenori, 1997, 1998.
5a8e282d6SKATO Takenori  *
6a8e282d6SKATO Takenori  * All rights reserved.  Unpublished rights reserved under the copyright
7a8e282d6SKATO Takenori  * laws of Japan.
8a8e282d6SKATO Takenori  *
9a8e282d6SKATO Takenori  * Redistribution and use in source and binary forms, with or without
10a8e282d6SKATO Takenori  * modification, are permitted provided that the following conditions
11a8e282d6SKATO Takenori  * are met:
12a8e282d6SKATO Takenori  *
13a8e282d6SKATO Takenori  * 1. Redistributions of source code must retain the above copyright
14a8e282d6SKATO Takenori  *    notice, this list of conditions and the following disclaimer as
15a8e282d6SKATO Takenori  *    the first lines of this file unmodified.
16a8e282d6SKATO Takenori  * 2. Redistributions in binary form must reproduce the above copyright
17a8e282d6SKATO Takenori  *    notice, this list of conditions and the following disclaimer in the
18a8e282d6SKATO Takenori  *    documentation and/or other materials provided with the distribution.
19a8e282d6SKATO Takenori  *
20a8e282d6SKATO Takenori  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21a8e282d6SKATO Takenori  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22a8e282d6SKATO Takenori  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23a8e282d6SKATO Takenori  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24a8e282d6SKATO Takenori  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25a8e282d6SKATO Takenori  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26a8e282d6SKATO Takenori  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27a8e282d6SKATO Takenori  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28a8e282d6SKATO Takenori  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29a8e282d6SKATO Takenori  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30a8e282d6SKATO Takenori  */
31a8e282d6SKATO Takenori 
3256ae44c5SDavid E. O'Brien #include <sys/cdefs.h>
33a8e282d6SKATO Takenori #include "opt_cpu.h"
34a8e282d6SKATO Takenori 
35a8e282d6SKATO Takenori #include <sys/param.h>
36a8e282d6SKATO Takenori #include <sys/kernel.h>
37cd9e9d1bSKonstantin Belousov #include <sys/pcpu.h>
38a8e282d6SKATO Takenori #include <sys/systm.h>
399d146ac5SPeter Wemm #include <sys/sysctl.h>
40a8e282d6SKATO Takenori 
41a8e282d6SKATO Takenori #include <machine/cputypes.h>
42a8e282d6SKATO Takenori #include <machine/md_var.h>
43a333a508SPeter Grehan #include <machine/psl.h>
44a8e282d6SKATO Takenori #include <machine/specialreg.h>
45a8e282d6SKATO Takenori 
46430e272cSPeter Wemm #include <vm/vm.h>
47430e272cSPeter Wemm #include <vm/pmap.h>
4820916c1fSKATO Takenori 
4910deca7eSJohn Baldwin static int	hw_instruction_sse;
509d146ac5SPeter Wemm SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
5110deca7eSJohn Baldwin     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
52cd155b56SDon Lewis static int	lower_sharedpage_init;
53cd155b56SDon Lewis int		hw_lower_amd64_sharedpage;
54cd155b56SDon Lewis SYSCTL_INT(_hw, OID_AUTO, lower_amd64_sharedpage, CTLFLAG_RDTUN,
55cd155b56SDon Lewis     &hw_lower_amd64_sharedpage, 0,
56cd155b56SDon Lewis    "Lower sharedpage to work around Ryzen issue with executing code near the top of user memory");
576f5c96c4SJun Kuriyama /*
586f5c96c4SJun Kuriyama  * -1: automatic (default)
596f5c96c4SJun Kuriyama  *  0: keep enable CLFLUSH
606f5c96c4SJun Kuriyama  *  1: force disable CLFLUSH
616f5c96c4SJun Kuriyama  */
626f5c96c4SJun Kuriyama static int	hw_clflush_disable = -1;
639d146ac5SPeter Wemm 
643ce5dbccSJung-uk Kim static void
init_amd(void)653ce5dbccSJung-uk Kim init_amd(void)
663ce5dbccSJung-uk Kim {
67f9ac50acSAndriy Gapon 	uint64_t msr;
683ce5dbccSJung-uk Kim 
693ce5dbccSJung-uk Kim 	/*
70d3ba71b2SKonstantin Belousov 	 * C1E renders the local APIC timer dead, so we disable it by
71d3ba71b2SKonstantin Belousov 	 * reading the Interrupt Pending Message register and clearing
72d3ba71b2SKonstantin Belousov 	 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
73d3ba71b2SKonstantin Belousov 	 *
74d3ba71b2SKonstantin Belousov 	 * Reference:
75d3ba71b2SKonstantin Belousov 	 *   "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
76d3ba71b2SKonstantin Belousov 	 *   #32559 revision 3.00+
77d3ba71b2SKonstantin Belousov 	 *
78d3ba71b2SKonstantin Belousov 	 * Detect the presence of C1E capability mostly on latest
79d3ba71b2SKonstantin Belousov 	 * dual-cores (or future) k8 family.  Affected models range is
80d3ba71b2SKonstantin Belousov 	 * taken from Linux sources.
81d3ba71b2SKonstantin Belousov 	 */
82d3ba71b2SKonstantin Belousov 	if ((CPUID_TO_FAMILY(cpu_id) == 0xf ||
83d3ba71b2SKonstantin Belousov 	    CPUID_TO_FAMILY(cpu_id) == 0x10) && (cpu_feature2 & CPUID2_HV) == 0)
84d3ba71b2SKonstantin Belousov 		cpu_amdc1e_bug = 1;
85d3ba71b2SKonstantin Belousov 
86d3ba71b2SKonstantin Belousov 	/*
873ce5dbccSJung-uk Kim 	 * Work around Erratum 721 for Family 10h and 12h processors.
883ce5dbccSJung-uk Kim 	 * These processors may incorrectly update the stack pointer
893ce5dbccSJung-uk Kim 	 * after a long series of push and/or near-call instructions,
903ce5dbccSJung-uk Kim 	 * or a long series of pop and/or near-return instructions.
913ce5dbccSJung-uk Kim 	 *
923ce5dbccSJung-uk Kim 	 * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf
933ce5dbccSJung-uk Kim 	 * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf
9465211d02SKonstantin Belousov 	 *
9565211d02SKonstantin Belousov 	 * Hypervisors do not provide access to the errata MSR,
9665211d02SKonstantin Belousov 	 * causing #GP exception on attempt to apply the errata.  The
9765211d02SKonstantin Belousov 	 * MSR write shall be done on host and persist globally
9865211d02SKonstantin Belousov 	 * anyway, so do not try to do it when under virtualization.
993ce5dbccSJung-uk Kim 	 */
1003ce5dbccSJung-uk Kim 	switch (CPUID_TO_FAMILY(cpu_id)) {
1013ce5dbccSJung-uk Kim 	case 0x10:
1023ce5dbccSJung-uk Kim 	case 0x12:
10365211d02SKonstantin Belousov 		if ((cpu_feature2 & CPUID2_HV) == 0)
104125bbadfSOlivier Certner 			wrmsr(MSR_DE_CFG, rdmsr(MSR_DE_CFG) |
105125bbadfSOlivier Certner 			    DE_CFG_10H_12H_STACK_POINTER_JUMP_FIX_BIT);
1063ce5dbccSJung-uk Kim 		break;
1073ce5dbccSJung-uk Kim 	}
108e5e44520SAndriy Gapon 
109e5e44520SAndriy Gapon 	/*
110e5e44520SAndriy Gapon 	 * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG.
111e5e44520SAndriy Gapon 	 * So, do it here or otherwise some tools could be confused by
112e5e44520SAndriy Gapon 	 * Initial Local APIC ID reported with CPUID Function 1 in EBX.
113e5e44520SAndriy Gapon 	 */
114e5e44520SAndriy Gapon 	if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
115e5e44520SAndriy Gapon 		if ((cpu_feature2 & CPUID2_HV) == 0) {
116e5e44520SAndriy Gapon 			msr = rdmsr(MSR_NB_CFG1);
117e5e44520SAndriy Gapon 			msr |= (uint64_t)1 << 54;
118e5e44520SAndriy Gapon 			wrmsr(MSR_NB_CFG1, msr);
119e5e44520SAndriy Gapon 		}
120e5e44520SAndriy Gapon 	}
121a2d87b79SAndriy Gapon 
122a2d87b79SAndriy Gapon 	/*
123a2d87b79SAndriy Gapon 	 * BIOS may configure Family 10h processors to convert WC+ cache type
124a2d87b79SAndriy Gapon 	 * to CD.  That can hurt performance of guest VMs using nested paging.
125a2d87b79SAndriy Gapon 	 * The relevant MSR bit is not documented in the BKDG,
126a2d87b79SAndriy Gapon 	 * the fix is borrowed from Linux.
127a2d87b79SAndriy Gapon 	 */
128a2d87b79SAndriy Gapon 	if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
129a2d87b79SAndriy Gapon 		if ((cpu_feature2 & CPUID2_HV) == 0) {
130a2d87b79SAndriy Gapon 			msr = rdmsr(0xc001102a);
131a2d87b79SAndriy Gapon 			msr &= ~((uint64_t)1 << 24);
132a2d87b79SAndriy Gapon 			wrmsr(0xc001102a, msr);
133a2d87b79SAndriy Gapon 		}
134a2d87b79SAndriy Gapon 	}
135f1382605SAndriy Gapon 
136f1382605SAndriy Gapon 	/*
137f1382605SAndriy Gapon 	 * Work around Erratum 793: Specific Combination of Writes to Write
138f1382605SAndriy Gapon 	 * Combined Memory Types and Locked Instructions May Cause Core Hang.
139f1382605SAndriy Gapon 	 * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors,
140f1382605SAndriy Gapon 	 * revision 3.04 or later, publication 51810.
141f1382605SAndriy Gapon 	 */
142f1382605SAndriy Gapon 	if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) {
143f1382605SAndriy Gapon 		if ((cpu_feature2 & CPUID2_HV) == 0) {
144bebcdc00SJohn Baldwin 			msr = rdmsr(MSR_LS_CFG);
145f1382605SAndriy Gapon 			msr |= (uint64_t)1 << 15;
146bebcdc00SJohn Baldwin 			wrmsr(MSR_LS_CFG, msr);
147f1382605SAndriy Gapon 		}
148f1382605SAndriy Gapon 	}
149cd155b56SDon Lewis 
15045ed991dSKonstantin Belousov 	/* Ryzen erratas. */
15145ed991dSKonstantin Belousov 	if (CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1 &&
15245ed991dSKonstantin Belousov 	    (cpu_feature2 & CPUID2_HV) == 0) {
15345ed991dSKonstantin Belousov 		/* 1021 */
154d8dc46f6SJohn Baldwin 		msr = rdmsr(MSR_DE_CFG);
155125bbadfSOlivier Certner 		msr |= DE_CFG_ZEN_LOAD_STALE_DATA_FIX_BIT;
156d8dc46f6SJohn Baldwin 		wrmsr(MSR_DE_CFG, msr);
15745ed991dSKonstantin Belousov 
15845ed991dSKonstantin Belousov 		/* 1033 */
159bebcdc00SJohn Baldwin 		msr = rdmsr(MSR_LS_CFG);
16045ed991dSKonstantin Belousov 		msr |= 0x10;
161bebcdc00SJohn Baldwin 		wrmsr(MSR_LS_CFG, msr);
16245ed991dSKonstantin Belousov 
16345ed991dSKonstantin Belousov 		/* 1049 */
16445ed991dSKonstantin Belousov 		msr = rdmsr(0xc0011028);
16545ed991dSKonstantin Belousov 		msr |= 0x10;
16645ed991dSKonstantin Belousov 		wrmsr(0xc0011028, msr);
16745ed991dSKonstantin Belousov 
16845ed991dSKonstantin Belousov 		/* 1095 */
169bebcdc00SJohn Baldwin 		msr = rdmsr(MSR_LS_CFG);
17045ed991dSKonstantin Belousov 		msr |= 0x200000000000000;
171bebcdc00SJohn Baldwin 		wrmsr(MSR_LS_CFG, msr);
17245ed991dSKonstantin Belousov 	}
17345ed991dSKonstantin Belousov 
174cd155b56SDon Lewis 	/*
175cd155b56SDon Lewis 	 * Work around a problem on Ryzen that is triggered by executing
176cd155b56SDon Lewis 	 * code near the top of user memory, in our case the signal
177cd155b56SDon Lewis 	 * trampoline code in the shared page on amd64.
178cd155b56SDon Lewis 	 *
179cd155b56SDon Lewis 	 * This function is executed once for the BSP before tunables take
180cd155b56SDon Lewis 	 * effect so the value determined here can be overridden by the
181cd155b56SDon Lewis 	 * tunable.  This function is then executed again for each AP and
182cd155b56SDon Lewis 	 * also on resume.  Set a flag the first time so that value set by
183cd155b56SDon Lewis 	 * the tunable is not overwritten.
184cd155b56SDon Lewis 	 *
185cd155b56SDon Lewis 	 * The stepping and/or microcode versions should be checked after
186cd155b56SDon Lewis 	 * this issue is fixed by AMD so that we don't use this mode if not
187cd155b56SDon Lewis 	 * needed.
188cd155b56SDon Lewis 	 */
189cd155b56SDon Lewis 	if (lower_sharedpage_init == 0) {
190cd155b56SDon Lewis 		lower_sharedpage_init = 1;
1912ee49facSKonstantin Belousov 		if (CPUID_TO_FAMILY(cpu_id) == 0x17 ||
1922ee49facSKonstantin Belousov 		    CPUID_TO_FAMILY(cpu_id) == 0x18) {
193cd155b56SDon Lewis 			hw_lower_amd64_sharedpage = 1;
194cd155b56SDon Lewis 		}
195cd155b56SDon Lewis 	}
196ebaea1bcSOlivier Certner 
197ebaea1bcSOlivier Certner 	/* Zenbleed.  See the comments in 'cpu_machdep.c'. */
198ebaea1bcSOlivier Certner 	zenbleed_check_and_apply(false);
1993ce5dbccSJung-uk Kim }
2003ce5dbccSJung-uk Kim 
20192df0bdaSJung-uk Kim /*
202cd45fec0SJung-uk Kim  * Initialize special VIA features
20392df0bdaSJung-uk Kim  */
20492df0bdaSJung-uk Kim static void
init_via(void)20592df0bdaSJung-uk Kim init_via(void)
20692df0bdaSJung-uk Kim {
20792df0bdaSJung-uk Kim 	u_int regs[4], val;
20892df0bdaSJung-uk Kim 
209cd45fec0SJung-uk Kim 	/*
210cd45fec0SJung-uk Kim 	 * Check extended CPUID for PadLock features.
211cd45fec0SJung-uk Kim 	 *
212cd45fec0SJung-uk Kim 	 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
213cd45fec0SJung-uk Kim 	 */
21492df0bdaSJung-uk Kim 	do_cpuid(0xc0000000, regs);
215cd45fec0SJung-uk Kim 	if (regs[0] >= 0xc0000001) {
21692df0bdaSJung-uk Kim 		do_cpuid(0xc0000001, regs);
21792df0bdaSJung-uk Kim 		val = regs[3];
21892df0bdaSJung-uk Kim 	} else
219cd45fec0SJung-uk Kim 		return;
22092df0bdaSJung-uk Kim 
221cd45fec0SJung-uk Kim 	/* Enable RNG if present. */
222cd45fec0SJung-uk Kim 	if ((val & VIA_CPUID_HAS_RNG) != 0) {
22392df0bdaSJung-uk Kim 		via_feature_rng = VIA_HAS_RNG;
224cd45fec0SJung-uk Kim 		wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
22592df0bdaSJung-uk Kim 	}
226cd45fec0SJung-uk Kim 
227cd45fec0SJung-uk Kim 	/* Enable PadLock if present. */
228cd45fec0SJung-uk Kim 	if ((val & VIA_CPUID_HAS_ACE) != 0)
22992df0bdaSJung-uk Kim 		via_feature_xcrypt |= VIA_HAS_AES;
230cd45fec0SJung-uk Kim 	if ((val & VIA_CPUID_HAS_ACE2) != 0)
23192df0bdaSJung-uk Kim 		via_feature_xcrypt |= VIA_HAS_AESCTR;
232cd45fec0SJung-uk Kim 	if ((val & VIA_CPUID_HAS_PHE) != 0)
23392df0bdaSJung-uk Kim 		via_feature_xcrypt |= VIA_HAS_SHA;
234cd45fec0SJung-uk Kim 	if ((val & VIA_CPUID_HAS_PMM) != 0)
23592df0bdaSJung-uk Kim 		via_feature_xcrypt |= VIA_HAS_MM;
236cd45fec0SJung-uk Kim 	if (via_feature_xcrypt != 0)
237cd45fec0SJung-uk Kim 		wrmsr(0x1107, rdmsr(0x1107) | (1 << 28));
23892df0bdaSJung-uk Kim }
23992df0bdaSJung-uk Kim 
2409d146ac5SPeter Wemm /*
241a333a508SPeter Grehan  * The value for the TSC_AUX MSR and rdtscp/rdpid on the invoking CPU.
242a333a508SPeter Grehan  *
243a333a508SPeter Grehan  * Caller should prevent CPU migration.
2443a3f1e9dSPeter Grehan  */
2453a3f1e9dSPeter Grehan u_int
cpu_auxmsr(void)2463a3f1e9dSPeter Grehan cpu_auxmsr(void)
2473a3f1e9dSPeter Grehan {
248a333a508SPeter Grehan 	KASSERT((read_rflags() & PSL_I) == 0, ("context switch possible"));
2493a3f1e9dSPeter Grehan 	return (PCPU_GET(cpuid));
2503a3f1e9dSPeter Grehan }
2513a3f1e9dSPeter Grehan 
252a2c08ebaSKonstantin Belousov void
cpu_init_small_core(void)253a2c08ebaSKonstantin Belousov cpu_init_small_core(void)
254a2c08ebaSKonstantin Belousov {
255a2c08ebaSKonstantin Belousov 	u_int r[4];
256a2c08ebaSKonstantin Belousov 
257a2c08ebaSKonstantin Belousov 	if (cpu_high < 0x1a)
258a2c08ebaSKonstantin Belousov 		return;
259a2c08ebaSKonstantin Belousov 
260a2c08ebaSKonstantin Belousov 	cpuid_count(0x1a, 0, r);
261a2c08ebaSKonstantin Belousov 	if ((r[0] & CPUID_HYBRID_CORE_MASK) != CPUID_HYBRID_SMALL_CORE)
262a2c08ebaSKonstantin Belousov 		return;
263a2c08ebaSKonstantin Belousov 
264a2c08ebaSKonstantin Belousov 	PCPU_SET(small_core, 1);
265a2c08ebaSKonstantin Belousov 	if (pmap_pcid_enabled && invpcid_works &&
266a2c08ebaSKonstantin Belousov 	    pmap_pcid_invlpg_workaround_uena) {
267a2c08ebaSKonstantin Belousov 		PCPU_SET(pcid_invlpg_workaround, 1);
268a2c08ebaSKonstantin Belousov 		pmap_pcid_invlpg_workaround = 1;
269a2c08ebaSKonstantin Belousov 	}
270a2c08ebaSKonstantin Belousov }
271a2c08ebaSKonstantin Belousov 
2723a3f1e9dSPeter Grehan /*
273430e272cSPeter Wemm  * Initialize CPU control registers
2749d146ac5SPeter Wemm  */
2759d146ac5SPeter Wemm void
initializecpu(void)276430e272cSPeter Wemm initializecpu(void)
2779d146ac5SPeter Wemm {
278430e272cSPeter Wemm 	uint64_t msr;
279cd9e9d1bSKonstantin Belousov 	uint32_t cr4;
280430e272cSPeter Wemm 
2819d6ae1e3SColin Percival 	TSENTER();
282cd9e9d1bSKonstantin Belousov 	cr4 = rcr4();
2839d146ac5SPeter Wemm 	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
284cd9e9d1bSKonstantin Belousov 		cr4 |= CR4_FXSR | CR4_XMM;
2855c321467SDmitry Chagin 		hw_instruction_sse = 1;
2869d146ac5SPeter Wemm 	}
287cd9e9d1bSKonstantin Belousov 	if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE)
288cd9e9d1bSKonstantin Belousov 		cr4 |= CR4_FSGSBASE;
289cd9e9d1bSKonstantin Belousov 
290e7a9df16SKonstantin Belousov 	if (cpu_stdext_feature2 & CPUID_STDEXT2_PKU)
291e7a9df16SKonstantin Belousov 		cr4 |= CR4_PKE;
292e7a9df16SKonstantin Belousov 
293cd9e9d1bSKonstantin Belousov 	/*
294ea602083SKonstantin Belousov 	 * If SMEP is present, we only need to flush RSB (by default)
295ea602083SKonstantin Belousov 	 * on context switches, to prevent cross-process ret2spec
296ea602083SKonstantin Belousov 	 * attacks.  Do it automatically if ibrs_disable is set, to
297ea602083SKonstantin Belousov 	 * complete the mitigation.
298ea602083SKonstantin Belousov 	 *
299cd9e9d1bSKonstantin Belousov 	 * Postpone enabling the SMEP on the boot CPU until the page
300cd9e9d1bSKonstantin Belousov 	 * tables are switched from the boot loader identity mapping
301cd9e9d1bSKonstantin Belousov 	 * to the kernel tables.  The boot loader enables the U bit in
302cd9e9d1bSKonstantin Belousov 	 * its tables.
303cd9e9d1bSKonstantin Belousov 	 */
304ea602083SKonstantin Belousov 	if (IS_BSP()) {
305ea602083SKonstantin Belousov 		if (cpu_stdext_feature & CPUID_STDEXT_SMEP &&
306ea602083SKonstantin Belousov 		    !TUNABLE_INT_FETCH(
307ea602083SKonstantin Belousov 		    "machdep.mitigations.cpu_flush_rsb_ctxsw",
308ea602083SKonstantin Belousov 		    &cpu_flush_rsb_ctxsw) &&
309ea602083SKonstantin Belousov 		    hw_ibrs_disable)
310ea602083SKonstantin Belousov 			cpu_flush_rsb_ctxsw = 1;
311ea602083SKonstantin Belousov 	} else {
312b3a7db3bSKonstantin Belousov 		if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
313cd9e9d1bSKonstantin Belousov 			cr4 |= CR4_SMEP;
314b3a7db3bSKonstantin Belousov 		if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
315b3a7db3bSKonstantin Belousov 			cr4 |= CR4_SMAP;
316b3a7db3bSKonstantin Belousov 	}
3179d6ae1e3SColin Percival 	TSENTER2("load_cr4");
318cd9e9d1bSKonstantin Belousov 	load_cr4(cr4);
3199d6ae1e3SColin Percival 	TSEXIT2("load_cr4");
320050f5a84SDmitry Chagin 	/* Reload cpu ext features to reflect cr4 changes */
32102904a06SKonstantin Belousov 	if (IS_BSP() && cold)
322050f5a84SDmitry Chagin 		identify_cpu_ext_features();
323beb24065SJonathan T. Looney 	if (IS_BSP() && (amd_feature & AMDID_NX) != 0) {
324430e272cSPeter Wemm 		msr = rdmsr(MSR_EFER) | EFER_NXE;
325430e272cSPeter Wemm 		wrmsr(MSR_EFER, msr);
326430e272cSPeter Wemm 		pg_nx = PG_NX;
3279d146ac5SPeter Wemm 	}
328a324b7f7SKonstantin Belousov 	hw_ibrs_recalculate(false);
3293621ba1eSKonstantin Belousov 	hw_ssb_recalculate(false);
3302dec2b4aSKonstantin Belousov 	amd64_syscall_ret_flush_l1d_recalc();
33117edf152SKonstantin Belousov 	x86_rngds_mitg_recalculate(false);
3323ce5dbccSJung-uk Kim 	switch (cpu_vendor_id) {
3333ce5dbccSJung-uk Kim 	case CPU_VENDOR_AMD:
3342ee49facSKonstantin Belousov 	case CPU_VENDOR_HYGON:
3353ce5dbccSJung-uk Kim 		init_amd();
3363ce5dbccSJung-uk Kim 		break;
3373ce5dbccSJung-uk Kim 	case CPU_VENDOR_CENTAUR:
33892df0bdaSJung-uk Kim 		init_via();
3393ce5dbccSJung-uk Kim 		break;
3403ce5dbccSJung-uk Kim 	}
34139d70f6bSKonstantin Belousov 
34239d70f6bSKonstantin Belousov 	if ((amd_feature & AMDID_RDTSCP) != 0 ||
34339d70f6bSKonstantin Belousov 	    (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0)
3443a3f1e9dSPeter Grehan 		wrmsr(MSR_TSC_AUX, cpu_auxmsr());
34545ac7755SKonstantin Belousov 
346a2c08ebaSKonstantin Belousov 	if (!IS_BSP())
347a2c08ebaSKonstantin Belousov 		cpu_init_small_core();
3489d6ae1e3SColin Percival 	TSEXIT();
349ec24e8d4SKonstantin Belousov }
350ec24e8d4SKonstantin Belousov 
351ec24e8d4SKonstantin Belousov void
initializecpucache(void)352cd234300SBrooks Davis initializecpucache(void)
353ec24e8d4SKonstantin Belousov {
354206a3368SKonstantin Belousov 
355206a3368SKonstantin Belousov 	/*
356206a3368SKonstantin Belousov 	 * CPUID with %eax = 1, %ebx returns
357206a3368SKonstantin Belousov 	 * Bits 15-8: CLFLUSH line size
358206a3368SKonstantin Belousov 	 * 	(Value * 8 = cache line size in bytes)
359206a3368SKonstantin Belousov 	 */
360206a3368SKonstantin Belousov 	if ((cpu_feature & CPUID_CLFSH) != 0)
361206a3368SKonstantin Belousov 		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
362b02395c6SKonstantin Belousov 	/*
3637134e390SJohn Baldwin 	 * XXXKIB: (temporary) hack to work around traps generated
3647134e390SJohn Baldwin 	 * when CLFLUSHing APIC register window under virtualization
3657134e390SJohn Baldwin 	 * environments.  These environments tend to disable the
3667134e390SJohn Baldwin 	 * CPUID_SS feature even though the native CPU supports it.
367b02395c6SKonstantin Belousov 	 */
3686f5c96c4SJun Kuriyama 	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
369af95bbf5SKonstantin Belousov 	if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
370b02395c6SKonstantin Belousov 		cpu_feature &= ~CPUID_CLFSH;
371af95bbf5SKonstantin Belousov 		cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
372af95bbf5SKonstantin Belousov 	}
373af95bbf5SKonstantin Belousov 
3746f5c96c4SJun Kuriyama 	/*
375af95bbf5SKonstantin Belousov 	 * The kernel's use of CLFLUSH{,OPT} can be disabled manually
376af95bbf5SKonstantin Belousov 	 * by setting the hw.clflush_disable tunable.
3776f5c96c4SJun Kuriyama 	 */
378af95bbf5SKonstantin Belousov 	if (hw_clflush_disable == 1) {
3796f5c96c4SJun Kuriyama 		cpu_feature &= ~CPUID_CLFSH;
380af95bbf5SKonstantin Belousov 		cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
381af95bbf5SKonstantin Belousov 	}
3826f5c96c4SJun Kuriyama }
383