1 /*- 2 * Copyright (c) KATO Takenori, 1997, 1998. 3 * Copyright (c) 2008 The DragonFly Project. 4 * 5 * All rights reserved. Unpublished rights reserved under the copyright 6 * laws of Japan. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer as 14 * the first lines of this file unmodified. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "opt_cpu.h" 32 33 #include <sys/param.h> 34 #include <sys/kernel.h> 35 #include <sys/systm.h> 36 #include <sys/sysctl.h> 37 38 #include <machine/cputypes.h> 39 #include <machine/md_var.h> 40 #include <machine/specialreg.h> 41 #include <machine/smp.h> 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 46 static int hw_instruction_sse; 47 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, 48 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); 49 50 int cpu; /* Are we 386, 386sx, 486, etc? */ 51 u_int cpu_feature; /* Feature flags */ 52 u_int cpu_feature2; /* Feature flags */ 53 u_int amd_feature; /* AMD feature flags */ 54 u_int amd_feature2; /* AMD feature flags */ 55 u_int via_feature_rng; /* VIA RNG features */ 56 u_int via_feature_xcrypt; /* VIA ACE features */ 57 u_int cpu_high; /* Highest arg to CPUID */ 58 u_int cpu_exthigh; /* Highest arg to extended CPUID */ 59 u_int cpu_id; /* Stepping ID */ 60 u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */ 61 u_int cpu_procinfo2; /* Multicore info */ 62 char cpu_vendor[20]; /* CPU Origin code */ 63 u_int cpu_vendor_id; /* CPU vendor ID */ 64 u_int cpu_fxsr; /* SSE enabled */ 65 u_int cpu_xsave; /* AVX enabled by OS*/ 66 u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */ 67 u_int cpu_clflush_line_size = 32; /* Default CLFLUSH line size */ 68 69 /* 70 * -1: automatic (enable on h/w, disable on VMs) 71 * 0: disable 72 * 1: enable (where available) 73 */ 74 static int hw_clflush_enable = -1; 75 76 SYSCTL_INT(_hw, OID_AUTO, clflush_enable, CTLFLAG_RD, &hw_clflush_enable, 0, 77 ""); 78 79 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD, 80 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU"); 81 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD, 82 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU"); 83 84 /* 85 * Initialize special VIA C3/C7 features 86 */ 87 static void 88 init_via(void) 89 { 90 u_int regs[4], val; 91 u_int64_t msreg; 92 93 do_cpuid(0xc0000000, regs); 94 val = regs[0]; 95 if (val >= 0xc0000001) { 96 do_cpuid(0xc0000001, regs); 97 val = regs[3]; 98 } else 99 val = 0; 100 101 /* Enable RNG if present and disabled */ 102 if (val & VIA_CPUID_HAS_RNG) { 103 if (!(val & VIA_CPUID_DO_RNG)) { 104 msreg = rdmsr(0x110B); 105 msreg |= 0x40; 106 wrmsr(0x110B, msreg); 107 } 108 via_feature_rng = VIA_HAS_RNG; 109 } 110 /* Enable AES engine if present and disabled */ 111 if (val & VIA_CPUID_HAS_ACE) { 112 if (!(val & VIA_CPUID_DO_ACE)) { 113 msreg = rdmsr(0x1107); 114 msreg |= (0x01 << 28); 115 wrmsr(0x1107, msreg); 116 } 117 via_feature_xcrypt |= VIA_HAS_AES; 118 } 119 /* Enable ACE2 engine if present and disabled */ 120 if (val & VIA_CPUID_HAS_ACE2) { 121 if (!(val & VIA_CPUID_DO_ACE2)) { 122 msreg = rdmsr(0x1107); 123 msreg |= (0x01 << 28); 124 wrmsr(0x1107, msreg); 125 } 126 via_feature_xcrypt |= VIA_HAS_AESCTR; 127 } 128 /* Enable SHA engine if present and disabled */ 129 if (val & VIA_CPUID_HAS_PHE) { 130 if (!(val & VIA_CPUID_DO_PHE)) { 131 msreg = rdmsr(0x1107); 132 msreg |= (0x01 << 28/**/); 133 wrmsr(0x1107, msreg); 134 } 135 via_feature_xcrypt |= VIA_HAS_SHA; 136 } 137 /* Enable MM engine if present and disabled */ 138 if (val & VIA_CPUID_HAS_PMM) { 139 if (!(val & VIA_CPUID_DO_PMM)) { 140 msreg = rdmsr(0x1107); 141 msreg |= (0x01 << 28/**/); 142 wrmsr(0x1107, msreg); 143 } 144 via_feature_xcrypt |= VIA_HAS_MM; 145 } 146 } 147 148 /* 149 * Initialize CPU control registers 150 */ 151 void 152 initializecpu(void) 153 { 154 uint64_t msr; 155 156 /*Check for FXSR and SSE support and enable if available.*/ 157 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { 158 load_cr4(rcr4() | CR4_FXSR | CR4_XMM); 159 cpu_fxsr = hw_instruction_sse = 1; 160 } 161 162 #if !defined(CPU_DISABLE_AVX) 163 /*Check for XSAVE and AVX support and enable if available.*/ 164 if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE) 165 && (cpu_feature & CPUID_SSE)) { 166 load_cr4(rcr4() | CR4_XSAVE); 167 168 /* Adjust size of savefpu in npx.h before adding to mask.*/ 169 xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0); 170 cpu_xsave = 1; 171 } 172 #endif 173 174 if (cpu_vendor_id == CPU_VENDOR_AMD) { 175 switch((cpu_id & 0xFF0000)) { 176 case 0x100000: 177 case 0x120000: 178 /* 179 * Errata 721 is the cpu bug found by your's truly 180 * (Matthew Dillon). It is a bug where a sequence 181 * of 5 or more popq's + a retq, under involved 182 * deep recursion circumstances, can cause the %rsp 183 * to not be properly updated, almost always 184 * resulting in a seg-fault soon after. 185 */ 186 msr = rdmsr(0xc0011029); 187 if ((msr & 1) == 0) { 188 kprintf("Errata 721 workaround installed\n"); 189 msr |= 1; 190 wrmsr(0xc0011029, msr); 191 } 192 break; 193 } 194 } 195 196 if ((amd_feature & AMDID_NX) != 0) { 197 msr = rdmsr(MSR_EFER) | EFER_NXE; 198 wrmsr(MSR_EFER, msr); 199 #if JG 200 pg_nx = PG_NX; 201 #endif 202 } 203 if (cpu_vendor_id == CPU_VENDOR_CENTAUR && 204 CPUID_TO_FAMILY(cpu_id) == 0x6 && 205 CPUID_TO_MODEL(cpu_id) >= 0xf) 206 init_via(); 207 208 if (cpu_feature2 & CPUID2_VMM) 209 vmm_guest = 1; 210 211 TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable); 212 if (cpu_feature & CPUID_CLFSH) { 213 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; 214 215 if (hw_clflush_enable == 0 || 216 ((hw_clflush_enable == -1) && vmm_guest)) 217 cpu_feature &= ~CPUID_CLFSH; 218 } 219 } 220