xref: /dragonfly/sys/platform/pc64/x86_64/identcpu.c (revision 17183580)
1 /*-
2  * Copyright (c) 1992 Terrence R. Lambert.
3  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
4  * Copyright (c) 1997 KATO Takenori.
5  * Copyright (c) 2008 The DragonFly Project.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * from: Id: machdep.c,v 1.193 1996/06/18 01:22:04 bde Exp
40  */
41 
42 #include "opt_cpu.h"
43 
44 #include <sys/param.h>
45 #include <sys/bus.h>
46 #include <sys/eventhandler.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/sysctl.h>
50 #include <sys/power.h>
51 
52 #include <machine/asmacros.h>
53 #include <machine/clock.h>
54 #include <machine/cputypes.h>
55 #include <machine/frame.h>
56 #include <machine/segments.h>
57 #include <machine/specialreg.h>
58 #include <machine/md_var.h>
59 #include <machine/npx.h>
60 
61 /* XXX - should be in header file: */
62 void printcpuinfo(void);
63 void identify_cpu(void);
64 void earlysetcpuclass(void);
65 void panicifcpuunsupported(void);
66 
67 static u_int find_cpu_vendor_id(void);
68 static void print_AMD_info(void);
69 static void print_AMD_assoc(int i);
70 static void print_via_padlock_info(void);
71 static void print_xsave_info(void);
72 
73 int	cpu_class;
74 char machine[] = "x86_64";
75 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD,
76     machine, 0, "Machine class");
77 
78 static char cpu_model[128];
79 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
80     cpu_model, 0, "Machine model");
81 
82 static int hw_clockrate;
83 SYSCTL_INT(_hw, OID_AUTO, clockrate, CTLFLAG_RD,
84     &hw_clockrate, 0, "CPU instruction clock rate");
85 
86 static char cpu_brand[48];
87 
88 static struct {
89 	char	*cpu_name;
90 	int	cpu_class;
91 } x86_64_cpus[] = {
92 	{ "Clawhammer",		CPUCLASS_K8 },		/* CPU_CLAWHAMMER */
93 	{ "Sledgehammer",	CPUCLASS_K8 },		/* CPU_SLEDGEHAMMER */
94 };
95 
96 static struct {
97 	char	*vendor;
98 	u_int	vendor_id;
99 } cpu_vendors[] = {
100 	{ INTEL_VENDOR_ID,	CPU_VENDOR_INTEL },	/* GenuineIntel */
101 	{ AMD_VENDOR_ID,	CPU_VENDOR_AMD },	/* AuthenticAMD */
102 	{ CENTAUR_VENDOR_ID,	CPU_VENDOR_CENTAUR },	/* CentaurHauls */
103 };
104 
105 #ifdef foo
106 static int cpu_cores;
107 static int cpu_logical;
108 #endif
109 
110 void
111 printcpuinfo(void)
112 {
113 	u_int regs[4], i;
114 	char *brand;
115 
116 	cpu_class = x86_64_cpus[cpu_type].cpu_class;
117 	kprintf("CPU: ");
118 	strncpy(cpu_model, x86_64_cpus[cpu_type].cpu_name, sizeof (cpu_model));
119 
120 	/* Check for extended CPUID information and a processor name. */
121 	if (cpu_exthigh >= 0x80000004) {
122 		brand = cpu_brand;
123 		for (i = 0x80000002; i < 0x80000005; i++) {
124 			do_cpuid(i, regs);
125 			memcpy(brand, regs, sizeof(regs));
126 			brand += sizeof(regs);
127 		}
128 	}
129 
130 	switch (cpu_vendor_id) {
131 	case CPU_VENDOR_INTEL:
132 		/* Please make up your mind folks! */
133 		strcat(cpu_model, "EM64T");
134 		break;
135 	case CPU_VENDOR_AMD:
136 		/*
137 		 * Values taken from AMD Processor Recognition
138 		 * http://www.amd.com/K6/k6docs/pdf/20734g.pdf
139 		 * (also describes ``Features'' encodings.
140 		 */
141 		strcpy(cpu_model, "AMD ");
142 		if ((cpu_id & 0xf00) == 0xf00)
143 			strcat(cpu_model, "AMD64 Processor");
144 		else
145 			strcat(cpu_model, "Unknown");
146 		break;
147 	case CPU_VENDOR_CENTAUR:
148 		strcpy(cpu_model, "VIA ");
149 		if ((cpu_id & 0xff0) == 0x6f0)
150 			strcat(cpu_model, "Nano Processor");
151 		else
152 			strcat(cpu_model, "Unknown");
153 		break;
154 	default:
155 		strcat(cpu_model, "Unknown");
156 		break;
157 	}
158 
159 	/*
160 	 * Replace cpu_model with cpu_brand minus leading spaces if
161 	 * we have one.
162 	 */
163 	brand = cpu_brand;
164 	while (*brand == ' ')
165 		++brand;
166 	if (*brand != '\0')
167 		strcpy(cpu_model, brand);
168 
169 	kprintf("%s (", cpu_model);
170 	switch(cpu_class) {
171 	case CPUCLASS_K8:
172 		hw_clockrate = (tsc_frequency + 5000) / 1000000;
173 		kprintf("%jd.%02d-MHz ",
174 		       (intmax_t)(tsc_frequency + 4999) / 1000000,
175 		       (u_int)((tsc_frequency + 4999) / 10000) % 100);
176 		kprintf("K8");
177 		break;
178 	default:
179 		kprintf("Unknown");	/* will panic below... */
180 	}
181 	kprintf("-class CPU)\n");
182 	if (*cpu_vendor)
183 		kprintf("  Origin=\"%s\"", cpu_vendor);
184 	if (cpu_id)
185 		kprintf("  Id=0x%x", cpu_id);
186 
187 	if (cpu_vendor_id == CPU_VENDOR_INTEL ||
188 	    cpu_vendor_id == CPU_VENDOR_AMD ||
189 	    cpu_vendor_id == CPU_VENDOR_CENTAUR) {
190 		kprintf("  Family=0x%x", CPUID_TO_FAMILY(cpu_id));
191 		kprintf("  Model=0x%x", CPUID_TO_MODEL(cpu_id));
192 		kprintf("  Stepping=%u", cpu_id & CPUID_STEPPING);
193 		if (cpu_high > 0) {
194 #if 0
195 			u_int cmp = 1, htt = 1;
196 #endif
197 
198 			/*
199 			 * Here we should probably set up flags indicating
200 			 * whether or not various features are available.
201 			 * The interesting ones are probably VME, PSE, PAE,
202 			 * and PGE.  The code already assumes without bothering
203 			 * to check that all CPUs >= Pentium have a TSC and
204 			 * MSRs.
205 			 */
206 			kprintf("\n  Features=0x%pb%i",
207 			"\020"
208 			"\001FPU"	/* Integral FPU */
209 			"\002VME"	/* Extended VM86 mode support */
210 			"\003DE"	/* Debugging Extensions (CR4.DE) */
211 			"\004PSE"	/* 4MByte page tables */
212 			"\005TSC"	/* Timestamp counter */
213 			"\006MSR"	/* Machine specific registers */
214 			"\007PAE"	/* Physical address extension */
215 			"\010MCE"	/* Machine Check support */
216 			"\011CX8"	/* CMPEXCH8 instruction */
217 			"\012APIC"	/* SMP local APIC */
218 			"\013oldMTRR"	/* Previous implementation of MTRR */
219 			"\014SEP"	/* Fast System Call */
220 			"\015MTRR"	/* Memory Type Range Registers */
221 			"\016PGE"	/* PG_G (global bit) support */
222 			"\017MCA"	/* Machine Check Architecture */
223 			"\020CMOV"	/* CMOV instruction */
224 			"\021PAT"	/* Page attributes table */
225 			"\022PSE36"	/* 36 bit address space support */
226 			"\023PN"	/* Processor Serial number */
227 			"\024CLFLUSH"	/* Has the CLFLUSH instruction */
228 			"\025<b20>"
229 			"\026DTS"	/* Debug Trace Store */
230 			"\027ACPI"	/* ACPI support */
231 			"\030MMX"	/* MMX instructions */
232 			"\031FXSR"	/* FXSAVE/FXRSTOR */
233 			"\032SSE"	/* Streaming SIMD Extensions */
234 			"\033SSE2"	/* Streaming SIMD Extensions #2 */
235 			"\034SS"	/* Self snoop */
236 			"\035HTT"	/* Hyperthreading (see EBX bit 16-23) */
237 			"\036TM"	/* Thermal Monitor clock slowdown */
238 			"\037IA64"	/* CPU can execute IA64 instructions */
239 			"\040PBE"	/* Pending Break Enable */
240 			, cpu_feature);
241 
242 			if (cpu_feature2 != 0) {
243 				kprintf("\n  Features2=0x%pb%i",
244 				"\020"
245 				"\001SSE3"	/* SSE3 */
246 				"\002PCLMULQDQ"	/* Carry-Less Mul Quadword */
247 				"\003DTES64"	/* 64-bit Debug Trace */
248 				"\004MON"	/* MONITOR/MWAIT Instructions */
249 				"\005DS_CPL"	/* CPL Qualified Debug Store */
250 				"\006VMX"	/* Virtual Machine Extensions */
251 				"\007SMX"	/* Safer Mode Extensions */
252 				"\010EST"	/* Enhanced SpeedStep */
253 				"\011TM2"	/* Thermal Monitor 2 */
254 				"\012SSSE3"	/* SSSE3 */
255 				"\013CNXT-ID"	/* L1 context ID available */
256 				"\014SDBG"	/* IA-32 silicon debug */
257 				"\015FMA"	/* Fused Multiply Add */
258 				"\016CX16"	/* CMPXCHG16B Instruction */
259 				"\017xTPR"	/* Send Task Priority Messages */
260 				"\020PDCM"	/* Perf/Debug Capability MSR */
261 				"\021<b16>"
262 				"\022PCID"	/* Process-context Identifiers */
263 				"\023DCA"	/* Direct Cache Access */
264 				"\024SSE4.1"	/* SSE 4.1 */
265 				"\025SSE4.2"	/* SSE 4.2 */
266 				"\026x2APIC"	/* xAPIC Extensions */
267 				"\027MOVBE"	/* MOVBE Instruction */
268 				"\030POPCNT"	/* POPCNT Instruction */
269 				"\031TSCDLT"	/* TSC-Deadline Timer */
270 				"\032AESNI"	/* AES Crypto */
271 				"\033XSAVE"	/* XSAVE/XRSTOR States */
272 				"\034OSXSAVE"	/* OS-Enabled State Management */
273 				"\035AVX"	/* Advanced Vector Extensions */
274 				"\036F16C"	/* Half-precision conversions */
275 				"\037RDRND"	/* RDRAND RNG function */
276 				"\040VMM"	/* Running on a hypervisor */
277 				, cpu_feature2);
278 			}
279 
280 			if ((cpu_feature2 & CPUID2_XSAVE) != 0)
281 				print_xsave_info();
282 
283 			if (cpu_ia32_arch_caps != 0) {
284 				kprintf("\n  IA32_ARCH_CAPS=0x%pb%i",
285 				       "\020"
286 				       "\001RDCL_NO"
287 				       "\002IBRS_ALL"
288 				       "\003RSBA"
289 				       "\004SKIP_L1DFL_VME"
290 				       "\005SSB_NO"
291 				       "\006MDS_NO"
292 				       "\007IF_PSCHANGE_MC_NO"
293 				       "\010TSX_CTRL"
294 				       "\011TAA_NO",
295 				       (u_int)cpu_ia32_arch_caps
296 				);
297 			}
298 
299 			/*
300 			 * AMD64 Architecture Programmer's Manual Volume 3:
301 			 * General-Purpose and System Instructions
302 			 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/24594.pdf
303 			 *
304 			 * IA-32 Intel Architecture Software Developer's Manual,
305 			 * Volume 2A: Instruction Set Reference, A-M
306 			 * ftp://download.intel.com/design/Pentium4/manuals/25366617.pdf
307 			 */
308 			if (amd_feature != 0) {
309 				kprintf("\n  AMD Features=0x%pb%i",
310 				"\020"		/* in hex */
311 				"\001<s0>"	/* Same */
312 				"\002<s1>"	/* Same */
313 				"\003<s2>"	/* Same */
314 				"\004<s3>"	/* Same */
315 				"\005<s4>"	/* Same */
316 				"\006<s5>"	/* Same */
317 				"\007<s6>"	/* Same */
318 				"\010<s7>"	/* Same */
319 				"\011<s8>"	/* Same */
320 				"\012<s9>"	/* Same */
321 				"\013<b10>"	/* Undefined */
322 				"\014SYSCALL"	/* Have SYSCALL/SYSRET */
323 				"\015<s12>"	/* Same */
324 				"\016<s13>"	/* Same */
325 				"\017<s14>"	/* Same */
326 				"\020<s15>"	/* Same */
327 				"\021<s16>"	/* Same */
328 				"\022<s17>"	/* Same */
329 				"\023<b18>"	/* Reserved, unknown */
330 				"\024MP"	/* Multiprocessor Capable */
331 				"\025NX"	/* Has EFER.NXE, NX */
332 				"\026<b21>"	/* Undefined */
333 				"\027MMX+"	/* AMD MMX Extensions */
334 				"\030<s23>"	/* Same */
335 				"\031<s24>"	/* Same */
336 				"\032FFXSR"	/* Fast FXSAVE/FXRSTOR */
337 				"\033Page1GB"	/* 1-GB large page support */
338 				"\034RDTSCP"	/* RDTSCP */
339 				"\035<b28>"	/* Undefined */
340 				"\036LM"	/* 64 bit long mode */
341 				"\0373DNow!+"	/* AMD 3DNow! Extensions */
342 				"\0403DNow!"	/* AMD 3DNow! */
343 				, amd_feature);
344 			}
345 
346 			if (amd_feature2 != 0) {
347 				kprintf("\n  AMD Features2=0x%pb%i",
348 				"\020"
349 				"\001LAHF"	/* LAHF/SAHF in long mode */
350 				"\002CMP"	/* CMP legacy */
351 				"\003SVM"	/* Secure Virtual Mode */
352 				"\004ExtAPIC"	/* Extended APIC register */
353 				"\005CR8"	/* CR8 in legacy mode */
354 				"\006ABM"	/* LZCNT instruction */
355 				"\007SSE4A"	/* SSE4A */
356 				"\010MAS"	/* Misaligned SSE mode */
357 				"\011Prefetch"	/* 3DNow! Prefetch/PrefetchW */
358 				"\012OSVW"	/* OS visible workaround */
359 				"\013IBS"	/* Instruction based sampling */
360 				"\014XOP"	/* XOP extended instructions */
361 				"\015SKINIT"	/* SKINIT/STGI */
362 				"\016WDT"	/* Watchdog timer */
363 				"\017<b14>"
364 				"\020LWP"	/* Lightweight Profiling */
365 				"\021FMA4"	/* 4-operand FMA instructions */
366 				"\022TCE"       /* Translation Cache Extension */
367 				"\023<b18>"
368 				"\024NodeId"	/* NodeId MSR support */
369 				"\025<b20>"
370 				"\026TBM"	/* Trailing Bit Manipulation */
371 				"\027Topology"	/* Topology Extensions */
372 				"\030PCX_CORE"  /* Core Performance Counter */
373 				"\031PCX_NB"    /* NB Performance Counter */
374 				"\032SPM"	/* Streaming Perf Monitor */
375 				"\033DBE"	/* Data Breakpoint Extension */
376 				"\034PTSC"	/* Performance TSC */
377 				"\035PCX_L2I"	/* L2I Performance Counter */
378 		       	        "\036MWAITX"	/* MONITORX/MWAITX instructions */
379 				"\037ADMSKX"	/* Address Mask Extension */
380 				"\040<b31>"
381 				, amd_feature2);
382 			}
383 
384 			if (cpu_stdext_feature != 0) {
385 				kprintf("\n  Structured Extended "
386 					"Features=0x%pb%i",
387 				        "\020"
388 				        /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
389 				        "\001GSFSBASE"
390 				        /* IA32_TSC_ADJUST MSR */
391 				        "\002TSCADJ"
392 				        /* Bit Manipulation Instructions */
393 				        "\004BMI1"
394 				        /* Hardware Lock Elision */
395 				        "\005HLE"
396 				        /* Advanced Vector Instructions 2 */
397 				        "\006AVX2"
398 				        /* FDP_EXCPTN_ONLY */
399 				        "\007FDPEXC"
400 				        /* Supervisor Mode Execution Prot. */
401 				        "\010SMEP"
402 				        /* Bit Manipulation Instructions 2 */
403 				        "\011BMI2"
404 				        /* Enhanced REP MOVSB/STOSB */
405 				        "\012ENHMOVSB"
406 				        /* Invalidate Processor Context ID */
407 				        "\013INVPCID"
408 				        /* Restricted Transactional Memory */
409 				        "\014RTM"
410 				        /* Platform QoS Monitoring */
411 				        "\015PQM"
412 				        /* Deprecate FPU CS/DS values */
413 				        "\016NFPUSG"
414 				        /* Intel Memory Protection Extensions */
415 				        "\017MPX"
416 				        /* Platform QoS Enforcement */
417 				        "\020PQE"
418 				        /* AVX512 Foundation */
419 				        "\021AVX512F"
420 				        /* AVX512 Double/Quadword */
421 				        "\022AVX512DQ"
422 				        /* Enhanced NRBG */
423 				        "\023RDSEED"
424 				        /* ADCX + ADOX */
425 				        "\024ADX"
426 				        /* Supervisor Mode Access Prevention */
427 				        "\025SMAP"
428 				        /* AVX512 Integer Fused Multiply Add */
429 				        "\026AVX512IFMA"
430 				        /* Formerly PCOMMIT */
431 				        "\027<b22>"
432 				        /* Cache Line FLUSH OPTimized */
433 				        "\030CLFLUSHOPT"
434 				        /* Cache Line Write Back */
435 				        "\031CLWB"
436 				        /* Processor Trace */
437 				        "\032PROCTRACE"
438 				        /* AVX512 Prefetch */
439 				        "\033AVX512PF"
440 				        /* AVX512 Exponential and Reciprocal */
441 				        "\034AVX512ER"
442 				        /* AVX512 Conflict Detection */
443 				        "\035AVX512CD"
444 				        /* SHA Extension */
445 				        "\036SHA"
446 				        /* AVX512 Byte and Word */
447 				        "\037AVX512BW"
448 				        /* AVX512 Vector Length */
449 				        "\040AVX512VL",
450 				        cpu_stdext_feature
451 				);
452 			}
453 
454 			if (cpu_stdext_feature2 != 0) {
455 				kprintf("\n  Structured Extended "
456 					"Features2=0x%pb%i",
457 				        "\020"
458 				        "\001PREFETCHWT1"
459 				        "\002AVX512VBMI"
460 				        "\003UMIP"
461 				        "\004PKU"
462 				        "\005OSPKE"
463 				        "\006WAITPKG"
464 				        "\007AVX512VBMI2"
465 				        "\010CET_SS"
466 				        "\011GFNI"
467 				        "\012VAES"
468 				        "\013VPCLMULQDQ"
469 				        "\014AVX512VNNI"
470 				        "\015AVX512BITALG"
471 				        "\016TME"
472 				        "\017AVX512VPOPCNTDQ"
473 				        "\021LA57"
474 				        "\027RDPID"
475 				        "\030KL"
476 				        "\031BUS_LOCK_DETECT"
477 				        "\032CLDEMOTE"
478 				        "\034MOVDIRI"
479 				        "\035MOVDIR64B"
480 				        "\036ENQCMD"
481 				        "\037SGXLC"
482 				        "\040PKS",
483 				        cpu_stdext_feature2
484 			       );
485 			}
486 
487 			if (cpu_stdext_feature3 != 0) {
488 				kprintf("\n  Structured Extended "
489 					"Features3=0x%pb%i",
490 				        "\020"
491 				        "\003AVX512_4VNNIW"
492 				        "\004AVX512_4FMAPS"
493 				        "\005FSRM"
494 				        "\006UINTR"
495 				        "\011AVX512VP2INTERSECT"
496 				        "\012MCUOPT"
497 				        "\013MD_CLEAR"
498 				        "\016TSXFA"
499 				        "\017SERIALIZE"
500 				        "\020HYBRID"
501 				        "\021TSXLDTRK"
502 				        "\023PCONFIG"
503 				        "\025IBT"
504 				        "\033IBPB"
505 				        "\034STIBP"
506 				        "\035L1DFL"
507 				        "\036ARCH_CAP"
508 				        "\037CORE_CAP"
509 				        "\040SSBD",
510 				        cpu_stdext_feature3
511 			       );
512 			}
513 
514 			if (cpu_thermal_feature != 0) {
515 				kprintf("\n  Thermal and PM Features=0x%pb%i",
516 				    "\020"
517 				    /* Digital temperature sensor */
518 				    "\001SENSOR"
519 				    /* Turbo boost */
520 				    "\002TURBO"
521 				    /* APIC-Timer-always-running */
522 				    "\003ARAT"
523 				    /* Power limit notification controls */
524 				    "\005PLN"
525 				    /* Clock modulation duty cycle extension */
526 				    "\006ECMD"
527 				    /* Package thermal management */
528 				    "\007PTM"
529 				    /* Hardware P-states */
530 				    "\010HWP"
531 				    , cpu_thermal_feature);
532 			}
533 
534 			if (cpu_mwait_feature != 0) {
535 				kprintf("\n  MONITOR/MWAIT Features=0x%pb%i",
536 				    "\020"
537 				    /* Enumeration of Monitor-Mwait extension */
538 				    "\001CST"
539 				    /*  interrupts as break-event for MWAIT */
540 				    "\002INTBRK"
541 				    , cpu_mwait_feature);
542 			}
543 
544 			if (cpu_vendor_id == CPU_VENDOR_CENTAUR)
545 				print_via_padlock_info();
546 
547 			/*
548 			 * INVALID CPU TOPOLOGY INFORMATION PRINT
549 			 * DEPRECATED - CPU_TOPOLOGY_DETECTION moved to
550 			 * - sys/platform/pc64/x86_64/mp_machdep.c
551 			 * - sys/kern/subr_cpu_topology
552 			 */
553 
554 #if 0
555 			if ((cpu_feature & CPUID_HTT) &&
556 			    cpu_vendor_id == CPU_VENDOR_AMD)
557 				cpu_feature &= ~CPUID_HTT;
558 #endif
559 
560 			/*
561 			 * If this CPU supports HTT or CMP then mention the
562 			 * number of physical/logical cores it contains.
563 			 */
564 #if 0
565 			if (cpu_feature & CPUID_HTT)
566 				htt = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
567 			if (cpu_vendor_id == CPU_VENDOR_AMD &&
568 			    (amd_feature2 & AMDID2_CMP))
569 				cmp = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
570 			else if (cpu_vendor_id == CPU_VENDOR_INTEL &&
571 			    (cpu_high >= 4)) {
572 				cpuid_count(4, 0, regs);
573 				if ((regs[0] & 0x1f) != 0)
574 					cmp = ((regs[0] >> 26) & 0x3f) + 1;
575 			}
576 #endif
577 #ifdef foo
578 			/*
579 			 * XXX For Intel CPUs, this is max number of cores per
580 			 * package, not the actual cores per package.
581 			 */
582 #if 0
583 			cpu_cores = cmp;
584 			cpu_logical = htt / cmp;
585 
586 			if (cpu_cores > 1)
587 				kprintf("\n  Cores per package: %d", cpu_cores);
588 			if (cpu_logical > 1) {
589 				kprintf("\n  Logical CPUs per core: %d",
590 				    cpu_logical);
591 			}
592 #endif
593 #endif
594 		}
595 	}
596 	/* Avoid ugly blank lines: only print newline when we have to. */
597 	if (*cpu_vendor || cpu_id)
598 		kprintf("\n");
599 
600 	if (cpu_stdext_feature & (CPUID_STDEXT_SMAP | CPUID_STDEXT_SMEP)) {
601 		kprintf("CPU Special Features Installed:");
602 		if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
603 			kprintf(" SMAP");
604 		if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
605 			kprintf(" SMEP");
606 		kprintf("\n");
607 	}
608 
609 	if (bootverbose) {
610 		if (cpu_vendor_id == CPU_VENDOR_AMD)
611 			print_AMD_info();
612 	}
613 }
614 
615 void
616 panicifcpuunsupported(void)
617 {
618 
619 #ifndef HAMMER_CPU
620 #error "You need to specify a cpu type"
621 #endif
622 	/*
623 	 * Now that we have told the user what they have,
624 	 * let them know if that machine type isn't configured.
625 	 */
626 	switch (cpu_class) {
627 	case CPUCLASS_X86:
628 #ifndef HAMMER_CPU
629 	case CPUCLASS_K8:
630 #endif
631 		panic("CPU class not configured");
632 	default:
633 		break;
634 	}
635 }
636 
637 
638 #if 0 /* JG */
639 /* Update TSC freq with the value indicated by the caller. */
640 static void
641 tsc_freq_changed(void *arg, const struct cf_level *level, int status)
642 {
643 	/* If there was an error during the transition, don't do anything. */
644 	if (status != 0)
645 		return;
646 
647 	/* Total setting for this level gives the new frequency in MHz. */
648 	hw_clockrate = level->total_set.freq;
649 }
650 
651 EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL,
652     EVENTHANDLER_PRI_ANY);
653 #endif
654 
655 /*
656  * Final stage of CPU identification.
657  */
658 void
659 identify_cpu(void)
660 {
661 	u_int regs[4];
662 	u_int cpu_stdext_disable;
663 
664 	do_cpuid(0, regs);
665 	cpu_high = regs[0];
666 	((u_int *)&cpu_vendor)[0] = regs[1];
667 	((u_int *)&cpu_vendor)[1] = regs[3];
668 	((u_int *)&cpu_vendor)[2] = regs[2];
669 	cpu_vendor[12] = '\0';
670 	cpu_vendor_id = find_cpu_vendor_id();
671 
672 	do_cpuid(1, regs);
673 	cpu_id = regs[0];
674 	cpu_procinfo = regs[1];
675 	cpu_feature = regs[3];
676 	cpu_feature2 = regs[2];
677 
678 	if (cpu_high >= 5) {
679 		do_cpuid(5, regs);
680 		cpu_mwait_feature = regs[2];
681 		if (cpu_mwait_feature & CPUID_MWAIT_EXT) {
682 			cpu_mwait_extemu = regs[3];
683 			/* At least one C1 */
684 			if (CPUID_MWAIT_CX_SUBCNT(cpu_mwait_extemu, 1) == 0) {
685 				/* No C1 at all, no MWAIT EXT then */
686 				cpu_mwait_feature &= ~CPUID_MWAIT_EXT;
687 				cpu_mwait_extemu = 0;
688 			}
689 		}
690 	}
691 	if (cpu_high >= 6) {
692 		do_cpuid(6, regs);
693 		cpu_thermal_feature = regs[0];
694 	}
695 	if (cpu_high >= 7) {
696 		cpuid_count(7, 0, regs);
697 		cpu_stdext_feature = regs[1];
698 
699 		/*
700 		 * Some hypervisors fail to filter out unsupported
701 		 * extended features.  For now, disable the
702 		 * extensions, activation of which requires setting a
703 		 * bit in CR4, and which VM monitors do not support.
704 		 */
705 		if (cpu_feature2 & CPUID2_VMM) {
706 			cpu_stdext_disable = CPUID_STDEXT_FSGSBASE |
707 					     CPUID_STDEXT_SMEP;
708 		} else {
709 			cpu_stdext_disable = 0;
710 		}
711 		TUNABLE_INT_FETCH("hw.cpu_stdext_disable", &cpu_stdext_disable);
712 
713 		/*
714 		 * Some hypervisors fail to implement
715 		 * MSR_IA32_ARCH_CAPABILITIES, catch any problems.
716 		 */
717 		cpu_stdext_feature &= ~cpu_stdext_disable;
718 		cpu_stdext_feature2 = regs[2];
719 		cpu_stdext_feature3 = regs[3];
720 		if (cpu_stdext_feature3 & CPUID_STDEXT3_ARCH_CAP) {
721 			if (rdmsr_safe(MSR_IA32_ARCH_CAPABILITIES,
722 				       &cpu_ia32_arch_caps))
723 			{
724 				kprintf("Warning: MSR_IA32_ARCH_CAPABILITIES "
725 					"cannot be accessed\n");
726 			}
727 		}
728 	}
729 
730 	if (cpu_vendor_id == CPU_VENDOR_INTEL ||
731 	    cpu_vendor_id == CPU_VENDOR_AMD ||
732 	    cpu_vendor_id == CPU_VENDOR_CENTAUR) {
733 		do_cpuid(0x80000000, regs);
734 		cpu_exthigh = regs[0];
735 	}
736 	if (cpu_exthigh >= 0x80000001) {
737 		do_cpuid(0x80000001, regs);
738 		amd_feature = regs[3] & ~(cpu_feature & 0x0183f3ff);
739 		amd_feature2 = regs[2];
740 	}
741 	if (cpu_exthigh >= 0x80000008) {
742 		do_cpuid(0x80000008, regs);
743 		cpu_procinfo2 = regs[2];
744 	}
745 
746 	/* XXX */
747 	cpu_type = CPU_CLAWHAMMER;
748 
749 	if (cpu_feature & CPUID_SSE2)
750 		cpu_mi_feature |= CPU_MI_BZERONT;
751 
752 	if (cpu_feature2 & CPUID2_MON)
753 		cpu_mi_feature |= CPU_MI_MONITOR;
754 
755 	/*
756 	 * We do assume that all CPUs have the same
757 	 * SSE/FXSR features
758 	 */
759 	if ((cpu_feature & CPUID_SSE) && (cpu_feature & CPUID_FXSR))
760 		npxprobemask();
761 }
762 
763 static u_int
764 find_cpu_vendor_id(void)
765 {
766 	int	i;
767 
768 	for (i = 0; i < NELEM(cpu_vendors); i++)
769 		if (strcmp(cpu_vendor, cpu_vendors[i].vendor) == 0)
770 			return (cpu_vendors[i].vendor_id);
771 	return (0);
772 }
773 
774 static void
775 print_AMD_assoc(int i)
776 {
777 	if (i == 255)
778 		kprintf(", fully associative\n");
779 	else
780 		kprintf(", %d-way associative\n", i);
781 }
782 
783 static void
784 print_AMD_l2_assoc(int i)
785 {
786 	switch (i & 0x0f) {
787 	case 0: kprintf(", disabled/not present\n"); break;
788 	case 1: kprintf(", direct mapped\n"); break;
789 	case 2: kprintf(", 2-way associative\n"); break;
790 	case 4: kprintf(", 4-way associative\n"); break;
791 	case 6: kprintf(", 8-way associative\n"); break;
792 	case 8: kprintf(", 16-way associative\n"); break;
793 	case 15: kprintf(", fully associative\n"); break;
794 	default: kprintf(", reserved configuration\n"); break;
795 	}
796 }
797 
798 static void
799 print_AMD_info(void)
800 {
801 	u_int regs[4];
802 
803 	if (cpu_exthigh < 0x80000005)
804 		return;
805 
806 	do_cpuid(0x80000005, regs);
807 	kprintf("L1 2MB data TLB: %d entries", (regs[0] >> 16) & 0xff);
808 	print_AMD_assoc(regs[0] >> 24);
809 
810 	kprintf("L1 2MB instruction TLB: %d entries", regs[0] & 0xff);
811 	print_AMD_assoc((regs[0] >> 8) & 0xff);
812 
813 	kprintf("L1 4KB data TLB: %d entries", (regs[1] >> 16) & 0xff);
814 	print_AMD_assoc(regs[1] >> 24);
815 
816 	kprintf("L1 4KB instruction TLB: %d entries", regs[1] & 0xff);
817 	print_AMD_assoc((regs[1] >> 8) & 0xff);
818 
819 	kprintf("L1 data cache: %d kbytes", regs[2] >> 24);
820 	kprintf(", %d bytes/line", regs[2] & 0xff);
821 	kprintf(", %d lines/tag", (regs[2] >> 8) & 0xff);
822 	print_AMD_assoc((regs[2] >> 16) & 0xff);
823 
824 	kprintf("L1 instruction cache: %d kbytes", regs[3] >> 24);
825 	kprintf(", %d bytes/line", regs[3] & 0xff);
826 	kprintf(", %d lines/tag", (regs[3] >> 8) & 0xff);
827 	print_AMD_assoc((regs[3] >> 16) & 0xff);
828 
829 	if (cpu_exthigh >= 0x80000006) {
830 		do_cpuid(0x80000006, regs);
831 		if ((regs[0] >> 16) != 0) {
832 			kprintf("L2 2MB data TLB: %d entries",
833 			    (regs[0] >> 16) & 0xfff);
834 			print_AMD_l2_assoc(regs[0] >> 28);
835 			kprintf("L2 2MB instruction TLB: %d entries",
836 			    regs[0] & 0xfff);
837 			print_AMD_l2_assoc((regs[0] >> 28) & 0xf);
838 		} else {
839 			kprintf("L2 2MB unified TLB: %d entries",
840 			    regs[0] & 0xfff);
841 			print_AMD_l2_assoc((regs[0] >> 28) & 0xf);
842 		}
843 		if ((regs[1] >> 16) != 0) {
844 			kprintf("L2 4KB data TLB: %d entries",
845 			    (regs[1] >> 16) & 0xfff);
846 			print_AMD_l2_assoc(regs[1] >> 28);
847 
848 			kprintf("L2 4KB instruction TLB: %d entries",
849 			    (regs[1] >> 16) & 0xfff);
850 			print_AMD_l2_assoc((regs[1] >> 28) & 0xf);
851 		} else {
852 			kprintf("L2 4KB unified TLB: %d entries",
853 			    (regs[1] >> 16) & 0xfff);
854 			print_AMD_l2_assoc((regs[1] >> 28) & 0xf);
855 		}
856 		kprintf("L2 unified cache: %d kbytes", regs[2] >> 16);
857 		kprintf(", %d bytes/line", regs[2] & 0xff);
858 		kprintf(", %d lines/tag", (regs[2] >> 8) & 0x0f);
859 		print_AMD_l2_assoc((regs[2] >> 12) & 0x0f);
860 	}
861 }
862 
863 static void
864 print_via_padlock_info(void)
865 {
866 	u_int regs[4];
867 
868 	/* Check for supported models. */
869 	switch (cpu_id & 0xff0) {
870 	case 0x690:
871 		if ((cpu_id & 0xf) < 3)
872 			return;
873 	case 0x6a0:
874 	case 0x6d0:
875 	case 0x6f0:
876 		break;
877 	default:
878 		return;
879 	}
880 
881 	do_cpuid(0xc0000000, regs);
882 	if (regs[0] >= 0xc0000001)
883 		do_cpuid(0xc0000001, regs);
884 	else
885 		return;
886 
887 	kprintf("\n  VIA Padlock Features=0x%pb%i",
888 	"\020"
889 	"\003RNG"		/* RNG */
890 	"\007AES"		/* ACE */
891 	"\011AES-CTR"		/* ACE2 */
892 	"\013SHA1,SHA256"	/* PHE */
893 	"\015RSA"		/* PMM */
894 	, regs[3]);
895 }
896 
897 static void
898 print_xsave_info(void)
899 {
900 	u_int regs[4];
901 
902 	cpuid_count(0xd, 0x1, regs);
903 	if (regs[0] == 0)
904 		return;
905 
906 	kprintf("\n  XSAVE Features=0x%pb%i",
907 		"\020"
908 		"\001XSAVEOPT"
909 		"\002XSAVEC"
910 		"\003XGETBV"
911 		"\004XSAVES"
912 		, regs[0]);
913 }
914 
915