1 /*- 2 * Copyright (c) KATO Takenori, 1997, 1998. 3 * Copyright (c) 2008 The DragonFly Project. 4 * 5 * All rights reserved. Unpublished rights reserved under the copyright 6 * laws of Japan. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer as 14 * the first lines of this file unmodified. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "opt_cpu.h" 32 33 #include <sys/param.h> 34 #include <sys/kernel.h> 35 #include <sys/systm.h> 36 #include <sys/sysctl.h> 37 38 #include <machine/cputypes.h> 39 #include <machine/md_var.h> 40 #include <machine/specialreg.h> 41 #include <machine/smp.h> 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 46 extern int i8254_cputimer_disable; 47 48 static int tsc_ignore_cpuid = 0; 49 TUNABLE_INT("hw.tsc_ignore_cpuid", &tsc_ignore_cpuid); 50 51 static int hw_instruction_sse; 52 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, 53 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); 54 55 int cpu_type; /* XXX CPU_CLAWHAMMER */ 56 u_int cpu_feature; /* Feature flags */ 57 u_int cpu_feature2; /* Feature flags */ 58 u_int amd_feature; /* AMD feature flags */ 59 u_int amd_feature2; /* AMD feature flags */ 60 u_int via_feature_rng; /* VIA RNG features */ 61 u_int via_feature_xcrypt; /* VIA ACE features */ 62 u_int cpu_high; /* Highest arg to CPUID */ 63 u_int cpu_exthigh; /* Highest arg to extended CPUID */ 64 u_int cpu_id; /* Stepping ID */ 65 u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */ 66 u_int cpu_procinfo2; /* Multicore info */ 67 char cpu_vendor[20]; /* CPU Origin code */ 68 u_int cpu_vendor_id; /* CPU vendor ID */ 69 u_int cpu_fxsr; /* SSE enabled */ 70 u_int cpu_xsave; /* AVX enabled by OS*/ 71 u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */ 72 u_int cpu_clflush_line_size = 32; /* Default CLFLUSH line size */ 73 u_int cpu_stdext_feature; 74 u_int cpu_thermal_feature; 75 u_int cpu_mwait_feature; 76 u_int cpu_mwait_extemu; 77 78 /* 79 * -1: automatic (enable on h/w, disable on VMs) 80 * 0: disable 81 * 1: enable (where available) 82 */ 83 static int hw_clflush_enable = -1; 84 85 SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0, 86 ""); 87 88 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD, 89 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU"); 90 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD, 91 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU"); 92 93 /* 94 * Initialize special VIA C3/C7 features 95 */ 96 static void 97 init_via(void) 98 { 99 u_int regs[4], val; 100 u_int64_t msreg; 101 102 do_cpuid(0xc0000000, regs); 103 val = regs[0]; 104 if (val >= 0xc0000001) { 105 do_cpuid(0xc0000001, regs); 106 val = regs[3]; 107 } else 108 val = 0; 109 110 /* Enable RNG if present and disabled */ 111 if (val & VIA_CPUID_HAS_RNG) { 112 if (!(val & VIA_CPUID_DO_RNG)) { 113 msreg = rdmsr(0x110B); 114 msreg |= 0x40; 115 wrmsr(0x110B, msreg); 116 } 117 via_feature_rng = VIA_HAS_RNG; 118 } 119 /* Enable AES engine if present and disabled */ 120 if (val & VIA_CPUID_HAS_ACE) { 121 if (!(val & VIA_CPUID_DO_ACE)) { 122 msreg = rdmsr(0x1107); 123 msreg |= (0x01 << 28); 124 wrmsr(0x1107, msreg); 125 } 126 via_feature_xcrypt |= VIA_HAS_AES; 127 } 128 /* Enable ACE2 engine if present and disabled */ 129 if (val & VIA_CPUID_HAS_ACE2) { 130 if (!(val & VIA_CPUID_DO_ACE2)) { 131 msreg = rdmsr(0x1107); 132 msreg |= (0x01 << 28); 133 wrmsr(0x1107, msreg); 134 } 135 via_feature_xcrypt |= VIA_HAS_AESCTR; 136 } 137 /* Enable SHA engine if present and disabled */ 138 if (val & VIA_CPUID_HAS_PHE) { 139 if (!(val & VIA_CPUID_DO_PHE)) { 140 msreg = rdmsr(0x1107); 141 msreg |= (0x01 << 28/**/); 142 wrmsr(0x1107, msreg); 143 } 144 via_feature_xcrypt |= VIA_HAS_SHA; 145 } 146 /* Enable MM engine if present and disabled */ 147 if (val & VIA_CPUID_HAS_PMM) { 148 if (!(val & VIA_CPUID_DO_PMM)) { 149 msreg = rdmsr(0x1107); 150 msreg |= (0x01 << 28/**/); 151 wrmsr(0x1107, msreg); 152 } 153 via_feature_xcrypt |= VIA_HAS_MM; 154 } 155 } 156 157 static enum vmm_guest_type 158 detect_vmm(void) 159 { 160 enum vmm_guest_type guest; 161 char vendor[16]; 162 163 /* 164 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 165 * http://lkml.org/lkml/2008/10/1/246 166 * 167 * KB1009458: Mechanisms to determine if software is running in 168 * a VMware virtual machine 169 * http://kb.vmware.com/kb/1009458 170 */ 171 if (cpu_feature2 & CPUID2_VMM) { 172 u_int regs[4]; 173 174 do_cpuid(0x40000000, regs); 175 ((u_int *)&vendor)[0] = regs[1]; 176 ((u_int *)&vendor)[1] = regs[2]; 177 ((u_int *)&vendor)[2] = regs[3]; 178 vendor[12] = '\0'; 179 if (regs[0] >= 0x40000000) { 180 memcpy(vmm_vendor, vendor, 13); 181 if (strcmp(vmm_vendor, "VMwareVMware") == 0) 182 return VMM_GUEST_VMWARE; 183 else if (strcmp(vmm_vendor, "Microsoft Hv") == 0) 184 return VMM_GUEST_HYPERV; 185 else if (strcmp(vmm_vendor, "KVMKVMKVM") == 0) 186 return VMM_GUEST_KVM; 187 } else if (regs[0] == 0) { 188 /* Also detect old KVM versions with regs[0] == 0 */ 189 if (strcmp(vendor, "KVMKVMKVM") == 0) { 190 memcpy(vmm_vendor, vendor, 13); 191 return VMM_GUEST_KVM; 192 } 193 } 194 } 195 196 guest = detect_virtual(); 197 if (guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM)) 198 guest = VMM_GUEST_UNKNOWN; 199 return guest; 200 } 201 202 /* 203 * Initialize CPU control registers 204 */ 205 void 206 initializecpu(int cpu) 207 { 208 uint64_t msr; 209 210 /*Check for FXSR and SSE support and enable if available.*/ 211 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { 212 load_cr4(rcr4() | CR4_FXSR | CR4_XMM); 213 cpu_fxsr = hw_instruction_sse = 1; 214 } 215 216 if (cpu == 0) { 217 /* Check if we are running in a hypervisor. */ 218 vmm_guest = detect_vmm(); 219 } 220 221 #if !defined(CPU_DISABLE_AVX) 222 /*Check for XSAVE and AVX support and enable if available.*/ 223 if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE) 224 && (cpu_feature & CPUID_SSE)) { 225 load_cr4(rcr4() | CR4_XSAVE); 226 227 /* Adjust size of savefpu in npx.h before adding to mask.*/ 228 xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0); 229 cpu_xsave = 1; 230 } 231 #endif 232 233 if (cpu_vendor_id == CPU_VENDOR_AMD) { 234 switch((cpu_id & 0xFF0000)) { 235 case 0x100000: 236 case 0x120000: 237 /* 238 * Errata 721 is the cpu bug found by your's truly 239 * (Matthew Dillon). It is a bug where a sequence 240 * of 5 or more popq's + a retq, under involved 241 * deep recursion circumstances, can cause the %rsp 242 * to not be properly updated, almost always 243 * resulting in a seg-fault soon after. 244 * 245 * Do not install the workaround when we are running 246 * in a virtual machine. 247 */ 248 if (vmm_guest) 249 break; 250 251 msr = rdmsr(MSR_AMD_DE_CFG); 252 if ((msr & 1) == 0) { 253 if (cpu == 0) 254 kprintf("Errata 721 workaround " 255 "installed\n"); 256 msr |= 1; 257 wrmsr(MSR_AMD_DE_CFG, msr); 258 } 259 break; 260 } 261 262 /* 263 * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should 264 * per BKDG. So, do it here or otherwise some tools could 265 * be confused by Initial Local APIC ID reported with 266 * CPUID Function 1 in EBX. 267 */ 268 if (CPUID_TO_FAMILY(cpu_id) == 0x10) { 269 if ((cpu_feature2 & CPUID2_VMM) == 0) { 270 msr = rdmsr(0xc001001f); 271 msr |= (uint64_t)1 << 54; 272 wrmsr(0xc001001f, msr); 273 } 274 } 275 276 /* 277 * BIOS may configure Family 10h processors to convert 278 * WC+ cache type to CD. That can hurt performance of 279 * guest VMs using nested paging. 280 * 281 * The relevant MSR bit is not documented in the BKDG, 282 * the fix is borrowed from Linux. 283 */ 284 if (CPUID_TO_FAMILY(cpu_id) == 0x10) { 285 if ((cpu_feature2 & CPUID2_VMM) == 0) { 286 msr = rdmsr(0xc001102a); 287 msr &= ~((uint64_t)1 << 24); 288 wrmsr(0xc001102a, msr); 289 } 290 } 291 292 /* 293 * Work around Erratum 793: Specific Combination of Writes 294 * to Write Combined Memory Types and Locked Instructions 295 * May Cause Core Hang. See Revision Guide for AMD Family 296 * 16h Models 00h-0Fh Processors, revision 3.04 or later, 297 * publication 51810. 298 */ 299 if (CPUID_TO_FAMILY(cpu_id) == 0x16 && 300 CPUID_TO_MODEL(cpu_id) <= 0xf) { 301 if ((cpu_feature2 & CPUID2_VMM) == 0) { 302 msr = rdmsr(0xc0011020); 303 msr |= (uint64_t)1 << 15; 304 wrmsr(0xc0011020, msr); 305 } 306 } 307 } 308 309 if ((amd_feature & AMDID_NX) != 0) { 310 msr = rdmsr(MSR_EFER) | EFER_NXE; 311 wrmsr(MSR_EFER, msr); 312 #if 0 /* JG */ 313 pg_nx = PG_NX; 314 #endif 315 } 316 if (cpu_vendor_id == CPU_VENDOR_CENTAUR && 317 CPUID_TO_FAMILY(cpu_id) == 0x6 && 318 CPUID_TO_MODEL(cpu_id) >= 0xf) 319 init_via(); 320 321 TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable); 322 if (cpu_feature & CPUID_CLFSH) { 323 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; 324 325 if (hw_clflush_enable == 0 || 326 ((hw_clflush_enable == -1) && vmm_guest)) 327 cpu_feature &= ~CPUID_CLFSH; 328 } 329 330 /* Set TSC_AUX register to the cpuid, for using rdtscp in userland. */ 331 if ((amd_feature & AMDID_RDTSCP) != 0) 332 wrmsr(MSR_TSCAUX, cpu); 333 } 334 335 /* 336 * This method should be at least as good as calibrating the TSC based on the 337 * HPET timer, since the HPET runs with the core crystal clock apparently. 338 */ 339 static void 340 detect_tsc_frequency(void) 341 { 342 int cpu_family, cpu_model; 343 u_int regs[4]; 344 uint64_t crystal = 0; 345 346 cpu_model = CPUID_TO_MODEL(cpu_id); 347 cpu_family = CPUID_TO_FAMILY(cpu_id); 348 349 if (cpu_vendor_id != CPU_VENDOR_INTEL) 350 return; 351 352 if (cpu_high < 0x15) 353 return; 354 355 do_cpuid(0x15, regs); 356 if (regs[0] == 0 || regs[1] == 0) 357 return; 358 359 if (regs[2] == 0) { 360 /* For some families the SDM contains the core crystal clock. */ 361 if (cpu_family == 0x6) { 362 switch (cpu_model) { 363 case 0x55: /* Xeon Scalable */ 364 crystal = 25000000; /* 25 MHz */ 365 break; 366 /* Skylake */ 367 case 0x4e: 368 case 0x5e: 369 /* Kabylake/Coffeelake */ 370 case 0x8e: 371 case 0x9e: 372 crystal = 24000000; /* 24 MHz */ 373 break; 374 case 0x5c: /* Goldmont Atom */ 375 crystal = 19200000; /* 19.2 MHz */ 376 break; 377 default: 378 break; 379 } 380 } 381 } else { 382 crystal = regs[2]; 383 } 384 385 if (crystal == 0) 386 return; 387 388 kprintf("TSC crystal clock: %ju Hz, TSC/crystal ratio: %u/%u\n", 389 crystal, regs[1], regs[0]); 390 391 if (tsc_ignore_cpuid == 0) { 392 tsc_frequency = (crystal * regs[1]) / regs[0]; 393 i8254_cputimer_disable = 1; 394 } 395 } 396 397 TIMECOUNTER_INIT(cpuid_tsc_frequency, detect_tsc_frequency); 398