xref: /dragonfly/sys/platform/pc64/x86_64/initcpu.c (revision f2a91d31)
1 /*-
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  * Copyright (c) 2008 The DragonFly Project.
4  *
5  * All rights reserved.  Unpublished rights reserved under the copyright
6  * laws of Japan.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer as
14  *    the first lines of this file unmodified.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "opt_cpu.h"
32 
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/sysctl.h>
37 
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41 #include <machine/smp.h>
42 
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 
46 static int	hw_instruction_sse;
47 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
48     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
49 
50 int	cpu_type;		/* Are we 386, 386sx, 486, etc? */
51 u_int	cpu_feature;		/* Feature flags */
52 u_int	cpu_feature2;		/* Feature flags */
53 u_int	amd_feature;		/* AMD feature flags */
54 u_int	amd_feature2;		/* AMD feature flags */
55 u_int	via_feature_rng;	/* VIA RNG features */
56 u_int	via_feature_xcrypt;	/* VIA ACE features */
57 u_int	cpu_high;		/* Highest arg to CPUID */
58 u_int	cpu_exthigh;		/* Highest arg to extended CPUID */
59 u_int	cpu_id;			/* Stepping ID */
60 u_int	cpu_procinfo;		/* HyperThreading Info / Brand Index / CLFUSH */
61 u_int	cpu_procinfo2;		/* Multicore info */
62 char	cpu_vendor[20];		/* CPU Origin code */
63 u_int	cpu_vendor_id;		/* CPU vendor ID */
64 u_int	cpu_fxsr;		/* SSE enabled */
65 u_int	cpu_xsave;		/* AVX enabled by OS*/
66 u_int	cpu_mxcsr_mask;		/* Valid bits in mxcsr */
67 u_int	cpu_clflush_line_size = 32;	/* Default CLFLUSH line size */
68 u_int	cpu_stdext_feature;
69 u_int	cpu_thermal_feature;
70 u_int	cpu_mwait_feature;
71 u_int	cpu_mwait_extemu;
72 
73 /*
74  * -1: automatic (enable on h/w, disable on VMs)
75  * 0: disable
76  * 1: enable (where available)
77  */
78 static int hw_clflush_enable = -1;
79 
80 SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0,
81 	   "");
82 
83 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
84 	&via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
85 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
86 	&via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
87 
88 /*
89  * Initialize special VIA C3/C7 features
90  */
91 static void
92 init_via(void)
93 {
94 	u_int regs[4], val;
95 	u_int64_t msreg;
96 
97 	do_cpuid(0xc0000000, regs);
98 	val = regs[0];
99 	if (val >= 0xc0000001) {
100 		do_cpuid(0xc0000001, regs);
101 		val = regs[3];
102 	} else
103 		val = 0;
104 
105 	/* Enable RNG if present and disabled */
106 	if (val & VIA_CPUID_HAS_RNG) {
107 		if (!(val & VIA_CPUID_DO_RNG)) {
108 			msreg = rdmsr(0x110B);
109 			msreg |= 0x40;
110 			wrmsr(0x110B, msreg);
111 		}
112 		via_feature_rng = VIA_HAS_RNG;
113 	}
114 	/* Enable AES engine if present and disabled */
115 	if (val & VIA_CPUID_HAS_ACE) {
116 		if (!(val & VIA_CPUID_DO_ACE)) {
117 			msreg = rdmsr(0x1107);
118 			msreg |= (0x01 << 28);
119 			wrmsr(0x1107, msreg);
120 		}
121 		via_feature_xcrypt |= VIA_HAS_AES;
122 	}
123 	/* Enable ACE2 engine if present and disabled */
124 	if (val & VIA_CPUID_HAS_ACE2) {
125 		if (!(val & VIA_CPUID_DO_ACE2)) {
126 			msreg = rdmsr(0x1107);
127 			msreg |= (0x01 << 28);
128 			wrmsr(0x1107, msreg);
129 		}
130 		via_feature_xcrypt |= VIA_HAS_AESCTR;
131 	}
132 	/* Enable SHA engine if present and disabled */
133 	if (val & VIA_CPUID_HAS_PHE) {
134 		if (!(val & VIA_CPUID_DO_PHE)) {
135 			msreg = rdmsr(0x1107);
136 			msreg |= (0x01 << 28/**/);
137 			wrmsr(0x1107, msreg);
138 		}
139 		via_feature_xcrypt |= VIA_HAS_SHA;
140 	}
141 	/* Enable MM engine if present and disabled */
142 	if (val & VIA_CPUID_HAS_PMM) {
143 		if (!(val & VIA_CPUID_DO_PMM)) {
144 			msreg = rdmsr(0x1107);
145 			msreg |= (0x01 << 28/**/);
146 			wrmsr(0x1107, msreg);
147 		}
148 		via_feature_xcrypt |= VIA_HAS_MM;
149 	}
150 }
151 
152 static enum vmm_guest_type
153 detect_vmm(void)
154 {
155 	enum vmm_guest_type guest;
156 	char vendor[16];
157 
158 	/*
159 	 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
160 	 * http://lkml.org/lkml/2008/10/1/246
161 	 *
162 	 * KB1009458: Mechanisms to determine if software is running in
163 	 * a VMware virtual machine
164 	 * http://kb.vmware.com/kb/1009458
165 	 */
166 	if (cpu_feature2 & CPUID2_VMM) {
167 		u_int regs[4];
168 
169 		do_cpuid(0x40000000, regs);
170 		((u_int *)&vendor)[0] = regs[1];
171 		((u_int *)&vendor)[1] = regs[2];
172 		((u_int *)&vendor)[2] = regs[3];
173 		vendor[12] = '\0';
174 		if (regs[0] >= 0x40000000) {
175 			memcpy(vmm_vendor, vendor, 13);
176 			if (strcmp(vmm_vendor, "VMwareVMware") == 0)
177 				return VMM_GUEST_VMWARE;
178 			else if (strcmp(vmm_vendor, "Microsoft Hv") == 0)
179 				return VMM_GUEST_HYPERV;
180 			else if (strcmp(vmm_vendor, "KVMKVMKVM") == 0)
181 				return VMM_GUEST_KVM;
182 		} else if (regs[0] == 0) {
183 			/* Also detect old KVM versions with regs[0] == 0 */
184 			if (strcmp(vendor, "KVMKVMKVM") == 0) {
185 				memcpy(vmm_vendor, vendor, 13);
186 				return VMM_GUEST_KVM;
187 			}
188 		}
189 	}
190 
191 	guest = detect_virtual();
192 	if (guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM))
193 		guest = VMM_GUEST_UNKNOWN;
194 	return guest;
195 }
196 
197 /*
198  * Initialize CPU control registers
199  */
200 void
201 initializecpu(int cpu)
202 {
203 	uint64_t msr;
204 
205 	/*Check for FXSR and SSE support and enable if available.*/
206 	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
207 		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
208 		cpu_fxsr = hw_instruction_sse = 1;
209 	}
210 
211 	if (cpu == 0) {
212 		/* Check if we are running in a hypervisor. */
213 		vmm_guest = detect_vmm();
214 	}
215 
216 #if !defined(CPU_DISABLE_AVX)
217 	/*Check for XSAVE and AVX support and enable if available.*/
218 	if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE)
219 	     && (cpu_feature & CPUID_SSE)) {
220 		load_cr4(rcr4() | CR4_XSAVE);
221 
222 		/* Adjust size of savefpu in npx.h before adding to mask.*/
223 		xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0);
224 		cpu_xsave = 1;
225 	}
226 #endif
227 
228 	if (cpu_vendor_id == CPU_VENDOR_AMD) {
229 		switch((cpu_id & 0xFF0000)) {
230 		case 0x100000:
231 		case 0x120000:
232 			/*
233 			 * Errata 721 is the cpu bug found by your's truly
234 			 * (Matthew Dillon).  It is a bug where a sequence
235 			 * of 5 or more popq's + a retq, under involved
236 			 * deep recursion circumstances, can cause the %rsp
237 			 * to not be properly updated, almost always
238 			 * resulting in a seg-fault soon after.
239 			 *
240 			 * Do not install the workaround when we are running
241 			 * in a virtual machine.
242 			 */
243 			if (vmm_guest)
244 				break;
245 
246 			msr = rdmsr(MSR_AMD_DE_CFG);
247 			if ((msr & 1) == 0) {
248 				if (cpu == 0)
249 					kprintf("Errata 721 workaround "
250 						"installed\n");
251 				msr |= 1;
252 				wrmsr(MSR_AMD_DE_CFG, msr);
253 			}
254 			break;
255 		}
256 	}
257 
258 	if ((amd_feature & AMDID_NX) != 0) {
259 		msr = rdmsr(MSR_EFER) | EFER_NXE;
260 		wrmsr(MSR_EFER, msr);
261 #if 0 /* JG */
262 		pg_nx = PG_NX;
263 #endif
264 	}
265 	if (cpu_vendor_id == CPU_VENDOR_CENTAUR &&
266 	    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
267 	    CPUID_TO_MODEL(cpu_id) >= 0xf)
268 		init_via();
269 
270 	TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable);
271 	if (cpu_feature & CPUID_CLFSH) {
272 		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
273 
274 		if (hw_clflush_enable == 0 ||
275 		    ((hw_clflush_enable == -1) && vmm_guest))
276 			cpu_feature &= ~CPUID_CLFSH;
277 	}
278 }
279