1 /*- 2 * Copyright (c) KATO Takenori, 1997, 1998. 3 * Copyright (c) 2008 The DragonFly Project. 4 * 5 * All rights reserved. Unpublished rights reserved under the copyright 6 * laws of Japan. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer as 14 * the first lines of this file unmodified. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "opt_cpu.h" 32 33 #include <sys/param.h> 34 #include <sys/kernel.h> 35 #include <sys/systm.h> 36 #include <sys/sysctl.h> 37 38 #include <machine/cputypes.h> 39 #include <machine/md_var.h> 40 #include <machine/specialreg.h> 41 #include <machine/smp.h> 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 46 static int hw_instruction_sse; 47 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, 48 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); 49 50 int cpu; /* Are we 386, 386sx, 486, etc? */ 51 u_int cpu_feature; /* Feature flags */ 52 u_int cpu_feature2; /* Feature flags */ 53 u_int amd_feature; /* AMD feature flags */ 54 u_int amd_feature2; /* AMD feature flags */ 55 u_int via_feature_rng; /* VIA RNG features */ 56 u_int via_feature_xcrypt; /* VIA ACE features */ 57 u_int cpu_high; /* Highest arg to CPUID */ 58 u_int cpu_exthigh; /* Highest arg to extended CPUID */ 59 u_int cpu_id; /* Stepping ID */ 60 u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */ 61 u_int cpu_procinfo2; /* Multicore info */ 62 char cpu_vendor[20]; /* CPU Origin code */ 63 u_int cpu_vendor_id; /* CPU vendor ID */ 64 u_int cpu_fxsr; /* SSE enabled */ 65 u_int cpu_xsave; /* AVX enabled by OS*/ 66 u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */ 67 u_int cpu_clflush_line_size = 32; /* Default CLFLUSH line size */ 68 u_int cpu_stdext_feature; 69 70 /* 71 * -1: automatic (enable on h/w, disable on VMs) 72 * 0: disable 73 * 1: enable (where available) 74 */ 75 static int hw_clflush_enable = -1; 76 77 SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0, 78 ""); 79 80 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD, 81 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU"); 82 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD, 83 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU"); 84 85 /* 86 * Initialize special VIA C3/C7 features 87 */ 88 static void 89 init_via(void) 90 { 91 u_int regs[4], val; 92 u_int64_t msreg; 93 94 do_cpuid(0xc0000000, regs); 95 val = regs[0]; 96 if (val >= 0xc0000001) { 97 do_cpuid(0xc0000001, regs); 98 val = regs[3]; 99 } else 100 val = 0; 101 102 /* Enable RNG if present and disabled */ 103 if (val & VIA_CPUID_HAS_RNG) { 104 if (!(val & VIA_CPUID_DO_RNG)) { 105 msreg = rdmsr(0x110B); 106 msreg |= 0x40; 107 wrmsr(0x110B, msreg); 108 } 109 via_feature_rng = VIA_HAS_RNG; 110 } 111 /* Enable AES engine if present and disabled */ 112 if (val & VIA_CPUID_HAS_ACE) { 113 if (!(val & VIA_CPUID_DO_ACE)) { 114 msreg = rdmsr(0x1107); 115 msreg |= (0x01 << 28); 116 wrmsr(0x1107, msreg); 117 } 118 via_feature_xcrypt |= VIA_HAS_AES; 119 } 120 /* Enable ACE2 engine if present and disabled */ 121 if (val & VIA_CPUID_HAS_ACE2) { 122 if (!(val & VIA_CPUID_DO_ACE2)) { 123 msreg = rdmsr(0x1107); 124 msreg |= (0x01 << 28); 125 wrmsr(0x1107, msreg); 126 } 127 via_feature_xcrypt |= VIA_HAS_AESCTR; 128 } 129 /* Enable SHA engine if present and disabled */ 130 if (val & VIA_CPUID_HAS_PHE) { 131 if (!(val & VIA_CPUID_DO_PHE)) { 132 msreg = rdmsr(0x1107); 133 msreg |= (0x01 << 28/**/); 134 wrmsr(0x1107, msreg); 135 } 136 via_feature_xcrypt |= VIA_HAS_SHA; 137 } 138 /* Enable MM engine if present and disabled */ 139 if (val & VIA_CPUID_HAS_PMM) { 140 if (!(val & VIA_CPUID_DO_PMM)) { 141 msreg = rdmsr(0x1107); 142 msreg |= (0x01 << 28/**/); 143 wrmsr(0x1107, msreg); 144 } 145 via_feature_xcrypt |= VIA_HAS_MM; 146 } 147 } 148 149 /* 150 * Initialize CPU control registers 151 */ 152 void 153 initializecpu(int cpu) 154 { 155 uint64_t msr; 156 157 /*Check for FXSR and SSE support and enable if available.*/ 158 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { 159 load_cr4(rcr4() | CR4_FXSR | CR4_XMM); 160 cpu_fxsr = hw_instruction_sse = 1; 161 } 162 163 /* Check if we are running in a hypervisor. */ 164 if (cpu_feature2 & CPUID2_VMM) 165 vmm_guest = 1; 166 167 #if !defined(CPU_DISABLE_AVX) 168 /*Check for XSAVE and AVX support and enable if available.*/ 169 if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE) 170 && (cpu_feature & CPUID_SSE)) { 171 load_cr4(rcr4() | CR4_XSAVE); 172 173 /* Adjust size of savefpu in npx.h before adding to mask.*/ 174 xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0); 175 cpu_xsave = 1; 176 } 177 #endif 178 179 if (cpu_vendor_id == CPU_VENDOR_AMD) { 180 switch((cpu_id & 0xFF0000)) { 181 case 0x100000: 182 case 0x120000: 183 /* 184 * Errata 721 is the cpu bug found by your's truly 185 * (Matthew Dillon). It is a bug where a sequence 186 * of 5 or more popq's + a retq, under involved 187 * deep recursion circumstances, can cause the %rsp 188 * to not be properly updated, almost always 189 * resulting in a seg-fault soon after. 190 * 191 * Do not install the workaround when we are running 192 * in a virtual machine. 193 */ 194 if (vmm_guest) 195 break; 196 197 msr = rdmsr(MSR_AMD_DE_CFG); 198 if ((msr & 1) == 0) { 199 if (cpu == 0) 200 kprintf("Errata 721 workaround " 201 "installed\n"); 202 msr |= 1; 203 wrmsr(MSR_AMD_DE_CFG, msr); 204 } 205 break; 206 } 207 } 208 209 if ((amd_feature & AMDID_NX) != 0) { 210 msr = rdmsr(MSR_EFER) | EFER_NXE; 211 wrmsr(MSR_EFER, msr); 212 #if JG 213 pg_nx = PG_NX; 214 #endif 215 } 216 if (cpu_vendor_id == CPU_VENDOR_CENTAUR && 217 CPUID_TO_FAMILY(cpu_id) == 0x6 && 218 CPUID_TO_MODEL(cpu_id) >= 0xf) 219 init_via(); 220 221 TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable); 222 if (cpu_feature & CPUID_CLFSH) { 223 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; 224 225 if (hw_clflush_enable == 0 || 226 ((hw_clflush_enable == -1) && vmm_guest)) 227 cpu_feature &= ~CPUID_CLFSH; 228 } 229 } 230