xref: /freebsd/sys/arm64/arm64/identcpu.c (revision 076ad2f8)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 
42 #include <machine/atomic.h>
43 #include <machine/cpu.h>
44 #include <machine/cpufunc.h>
45 
46 static int ident_lock;
47 
48 char machine[] = "arm64";
49 
50 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
51     "Machine class");
52 
53 /*
54  * Per-CPU affinity as provided in MPIDR_EL1
55  * Indexed by CPU number in logical order selected by the system.
56  * Relevant fields can be extracted using CPU_AFFn macros,
57  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
58  *
59  * Fields used by us:
60  * Aff1 - Cluster number
61  * Aff0 - CPU number in Aff1 cluster
62  */
63 uint64_t __cpu_affinity[MAXCPU];
64 static u_int cpu_aff_levels;
65 
66 struct cpu_desc {
67 	u_int		cpu_impl;
68 	u_int		cpu_part_num;
69 	u_int		cpu_variant;
70 	u_int		cpu_revision;
71 	const char	*cpu_impl_name;
72 	const char	*cpu_part_name;
73 
74 	uint64_t	mpidr;
75 	uint64_t	id_aa64afr0;
76 	uint64_t	id_aa64afr1;
77 	uint64_t	id_aa64dfr0;
78 	uint64_t	id_aa64dfr1;
79 	uint64_t	id_aa64isar0;
80 	uint64_t	id_aa64isar1;
81 	uint64_t	id_aa64mmfr0;
82 	uint64_t	id_aa64mmfr1;
83 	uint64_t	id_aa64pfr0;
84 	uint64_t	id_aa64pfr1;
85 };
86 
87 struct cpu_desc cpu_desc[MAXCPU];
88 static u_int cpu_print_regs;
89 #define	PRINT_ID_AA64_AFR0	0x00000001
90 #define	PRINT_ID_AA64_AFR1	0x00000002
91 #define	PRINT_ID_AA64_DFR0	0x00000004
92 #define	PRINT_ID_AA64_DFR1	0x00000008
93 #define	PRINT_ID_AA64_ISAR0	0x00000010
94 #define	PRINT_ID_AA64_ISAR1	0x00000020
95 #define	PRINT_ID_AA64_MMFR0	0x00000040
96 #define	PRINT_ID_AA64_MMFR1	0x00000080
97 #define	PRINT_ID_AA64_PFR0	0x00000100
98 #define	PRINT_ID_AA64_PFR1	0x00000200
99 
100 struct cpu_parts {
101 	u_int		part_id;
102 	const char	*part_name;
103 };
104 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
105 
106 struct cpu_implementers {
107 	u_int			impl_id;
108 	const char		*impl_name;
109 	/*
110 	 * Part number is implementation defined
111 	 * so each vendor will have its own set of values and names.
112 	 */
113 	const struct cpu_parts	*cpu_parts;
114 };
115 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
116 
117 /*
118  * Per-implementer table of (PartNum, CPU Name) pairs.
119  */
120 /* ARM Ltd. */
121 static const struct cpu_parts cpu_parts_arm[] = {
122 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
123 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
124 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
125 	CPU_PART_NONE,
126 };
127 /* Cavium */
128 static const struct cpu_parts cpu_parts_cavium[] = {
129 	{ CPU_PART_THUNDER, "Thunder" },
130 	CPU_PART_NONE,
131 };
132 
133 /* Unknown */
134 static const struct cpu_parts cpu_parts_none[] = {
135 	CPU_PART_NONE,
136 };
137 
138 /*
139  * Implementers table.
140  */
141 const struct cpu_implementers cpu_implementers[] = {
142 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
143 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
144 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
145 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
146 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
147 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
148 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
149 	{ CPU_IMPL_APM,		"APM",		cpu_parts_none },
150 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
151 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
152 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
153 	CPU_IMPLEMENTER_NONE,
154 };
155 
156 static void
157 identify_cpu_sysinit(void *dummy __unused)
158 {
159 	int cpu;
160 
161 	CPU_FOREACH(cpu) {
162 		print_cpu_features(cpu);
163 	}
164 }
165 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
166 
167 void
168 print_cpu_features(u_int cpu)
169 {
170 	int printed;
171 
172 	printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name,
173 	    cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant,
174 	    cpu_desc[cpu].cpu_revision);
175 
176 	printf(" affinity:");
177 	switch(cpu_aff_levels) {
178 	default:
179 	case 4:
180 		printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
181 		/* FALLTHROUGH */
182 	case 3:
183 		printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
184 		/* FALLTHROUGH */
185 	case 2:
186 		printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
187 		/* FALLTHROUGH */
188 	case 1:
189 	case 0: /* On UP this will be zero */
190 		printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
191 		break;
192 	}
193 	printf("\n");
194 
195 	/*
196 	 * There is a hardware errata where, if one CPU is performing a TLB
197 	 * invalidation while another is performing a store-exclusive the
198 	 * store-exclusive may return the wrong status. A workaround seems
199 	 * to be to use an IPI to invalidate on each CPU, however given the
200 	 * limited number of affected units (pass 1.1 is the evaluation
201 	 * hardware revision), and the lack of information from Cavium
202 	 * this has not been implemented.
203 	 *
204 	 * At the time of writing this the only information is from:
205 	 * https://lkml.org/lkml/2016/8/4/722
206 	 */
207 	/*
208 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 on its own also
209 	 * triggers on pass 2.0+.
210 	 */
211 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
212 	    CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1)
213 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
214 		    "hardware bugs that may cause the incorrect operation of "
215 		    "atomic operations.\n");
216 
217 	if (cpu != 0 && cpu_print_regs == 0)
218 		return;
219 
220 #define SEP_STR	((printed++) == 0) ? "" : ","
221 
222 	/* AArch64 Instruction Set Attribute Register 0 */
223 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
224 		printed = 0;
225 		printf(" Instruction Set Attributes 0 = <");
226 
227 		switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
228 		case ID_AA64ISAR0_RDM_NONE:
229 			break;
230 		case ID_AA64ISAR0_RDM_IMPL:
231 			printf("%sRDM", SEP_STR);
232 			break;
233 		default:
234 			printf("%sUnknown RDM", SEP_STR);
235 		}
236 
237 		switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
238 		case ID_AA64ISAR0_ATOMIC_NONE:
239 			break;
240 		case ID_AA64ISAR0_ATOMIC_IMPL:
241 			printf("%sAtomic", SEP_STR);
242 			break;
243 		default:
244 			printf("%sUnknown Atomic", SEP_STR);
245 		}
246 
247 		switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
248 		case ID_AA64ISAR0_AES_NONE:
249 			break;
250 		case ID_AA64ISAR0_AES_BASE:
251 			printf("%sAES", SEP_STR);
252 			break;
253 		case ID_AA64ISAR0_AES_PMULL:
254 			printf("%sAES+PMULL", SEP_STR);
255 			break;
256 		default:
257 			printf("%sUnknown AES", SEP_STR);
258 			break;
259 		}
260 
261 		switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
262 		case ID_AA64ISAR0_SHA1_NONE:
263 			break;
264 		case ID_AA64ISAR0_SHA1_BASE:
265 			printf("%sSHA1", SEP_STR);
266 			break;
267 		default:
268 			printf("%sUnknown SHA1", SEP_STR);
269 			break;
270 		}
271 
272 		switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
273 		case ID_AA64ISAR0_SHA2_NONE:
274 			break;
275 		case ID_AA64ISAR0_SHA2_BASE:
276 			printf("%sSHA2", SEP_STR);
277 			break;
278 		default:
279 			printf("%sUnknown SHA2", SEP_STR);
280 			break;
281 		}
282 
283 		switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
284 		case ID_AA64ISAR0_CRC32_NONE:
285 			break;
286 		case ID_AA64ISAR0_CRC32_BASE:
287 			printf("%sCRC32", SEP_STR);
288 			break;
289 		default:
290 			printf("%sUnknown CRC32", SEP_STR);
291 			break;
292 		}
293 
294 		if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
295 			printf("%s%#lx", SEP_STR,
296 			    cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
297 
298 		printf(">\n");
299 	}
300 
301 	/* AArch64 Instruction Set Attribute Register 1 */
302 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
303 		printf(" Instruction Set Attributes 1 = <%#lx>\n",
304 		    cpu_desc[cpu].id_aa64isar1);
305 	}
306 
307 	/* AArch64 Processor Feature Register 0 */
308 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
309 		printed = 0;
310 		printf("         Processor Features 0 = <");
311 		switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
312 		case ID_AA64PFR0_GIC_CPUIF_NONE:
313 			break;
314 		case ID_AA64PFR0_GIC_CPUIF_EN:
315 			printf("%sGIC", SEP_STR);
316 			break;
317 		default:
318 			printf("%sUnknown GIC interface", SEP_STR);
319 			break;
320 		}
321 
322 		switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
323 		case ID_AA64PFR0_ADV_SIMD_NONE:
324 			break;
325 		case ID_AA64PFR0_ADV_SIMD_IMPL:
326 			printf("%sAdvSIMD", SEP_STR);
327 			break;
328 		default:
329 			printf("%sUnknown AdvSIMD", SEP_STR);
330 			break;
331 		}
332 
333 		switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
334 		case ID_AA64PFR0_FP_NONE:
335 			break;
336 		case ID_AA64PFR0_FP_IMPL:
337 			printf("%sFloat", SEP_STR);
338 			break;
339 		default:
340 			printf("%sUnknown Float", SEP_STR);
341 			break;
342 		}
343 
344 		switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
345 		case ID_AA64PFR0_EL3_NONE:
346 			printf("%sNo EL3", SEP_STR);
347 			break;
348 		case ID_AA64PFR0_EL3_64:
349 			printf("%sEL3", SEP_STR);
350 			break;
351 		case ID_AA64PFR0_EL3_64_32:
352 			printf("%sEL3 32", SEP_STR);
353 			break;
354 		default:
355 			printf("%sUnknown EL3", SEP_STR);
356 			break;
357 		}
358 
359 		switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
360 		case ID_AA64PFR0_EL2_NONE:
361 			printf("%sNo EL2", SEP_STR);
362 			break;
363 		case ID_AA64PFR0_EL2_64:
364 			printf("%sEL2", SEP_STR);
365 			break;
366 		case ID_AA64PFR0_EL2_64_32:
367 			printf("%sEL2 32", SEP_STR);
368 			break;
369 		default:
370 			printf("%sUnknown EL2", SEP_STR);
371 			break;
372 		}
373 
374 		switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
375 		case ID_AA64PFR0_EL1_64:
376 			printf("%sEL1", SEP_STR);
377 			break;
378 		case ID_AA64PFR0_EL1_64_32:
379 			printf("%sEL1 32", SEP_STR);
380 			break;
381 		default:
382 			printf("%sUnknown EL1", SEP_STR);
383 			break;
384 		}
385 
386 		switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
387 		case ID_AA64PFR0_EL0_64:
388 			printf("%sEL0", SEP_STR);
389 			break;
390 		case ID_AA64PFR0_EL0_64_32:
391 			printf("%sEL0 32", SEP_STR);
392 			break;
393 		default:
394 			printf("%sUnknown EL0", SEP_STR);
395 			break;
396 		}
397 
398 		if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
399 			printf("%s%#lx", SEP_STR,
400 			    cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
401 
402 		printf(">\n");
403 	}
404 
405 	/* AArch64 Processor Feature Register 1 */
406 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
407 		printf("         Processor Features 1 = <%#lx>\n",
408 		    cpu_desc[cpu].id_aa64pfr1);
409 	}
410 
411 	/* AArch64 Memory Model Feature Register 0 */
412 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
413 		printed = 0;
414 		printf("      Memory Model Features 0 = <");
415 		switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
416 		case ID_AA64MMFR0_TGRAN4_NONE:
417 			break;
418 		case ID_AA64MMFR0_TGRAN4_IMPL:
419 			printf("%s4k Granule", SEP_STR);
420 			break;
421 		default:
422 			printf("%sUnknown 4k Granule", SEP_STR);
423 			break;
424 		}
425 
426 		switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
427 		case ID_AA64MMFR0_TGRAN16_NONE:
428 			break;
429 		case ID_AA64MMFR0_TGRAN16_IMPL:
430 			printf("%s16k Granule", SEP_STR);
431 			break;
432 		default:
433 			printf("%sUnknown 16k Granule", SEP_STR);
434 			break;
435 		}
436 
437 		switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
438 		case ID_AA64MMFR0_TGRAN64_NONE:
439 			break;
440 		case ID_AA64MMFR0_TGRAN64_IMPL:
441 			printf("%s64k Granule", SEP_STR);
442 			break;
443 		default:
444 			printf("%sUnknown 64k Granule", SEP_STR);
445 			break;
446 		}
447 
448 		switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
449 		case ID_AA64MMFR0_BIGEND_FIXED:
450 			break;
451 		case ID_AA64MMFR0_BIGEND_MIXED:
452 			printf("%sMixedEndian", SEP_STR);
453 			break;
454 		default:
455 			printf("%sUnknown Endian switching", SEP_STR);
456 			break;
457 		}
458 
459 		switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
460 		case ID_AA64MMFR0_BIGEND_EL0_FIXED:
461 			break;
462 		case ID_AA64MMFR0_BIGEND_EL0_MIXED:
463 			printf("%sEL0 MixEndian", SEP_STR);
464 			break;
465 		default:
466 			printf("%sUnknown EL0 Endian switching", SEP_STR);
467 			break;
468 		}
469 
470 		switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
471 		case ID_AA64MMFR0_S_NS_MEM_NONE:
472 			break;
473 		case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
474 			printf("%sS/NS Mem", SEP_STR);
475 			break;
476 		default:
477 			printf("%sUnknown S/NS Mem", SEP_STR);
478 			break;
479 		}
480 
481 		switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
482 		case ID_AA64MMFR0_ASID_BITS_8:
483 			printf("%s8bit ASID", SEP_STR);
484 			break;
485 		case ID_AA64MMFR0_ASID_BITS_16:
486 			printf("%s16bit ASID", SEP_STR);
487 			break;
488 		default:
489 			printf("%sUnknown ASID", SEP_STR);
490 			break;
491 		}
492 
493 		switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
494 		case ID_AA64MMFR0_PA_RANGE_4G:
495 			printf("%s4GB PA", SEP_STR);
496 			break;
497 		case ID_AA64MMFR0_PA_RANGE_64G:
498 			printf("%s64GB PA", SEP_STR);
499 			break;
500 		case ID_AA64MMFR0_PA_RANGE_1T:
501 			printf("%s1TB PA", SEP_STR);
502 			break;
503 		case ID_AA64MMFR0_PA_RANGE_4T:
504 			printf("%s4TB PA", SEP_STR);
505 			break;
506 		case ID_AA64MMFR0_PA_RANGE_16T:
507 			printf("%s16TB PA", SEP_STR);
508 			break;
509 		case ID_AA64MMFR0_PA_RANGE_256T:
510 			printf("%s256TB PA", SEP_STR);
511 			break;
512 		default:
513 			printf("%sUnknown PA Range", SEP_STR);
514 			break;
515 		}
516 
517 		if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
518 			printf("%s%#lx", SEP_STR,
519 			    cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
520 		printf(">\n");
521 	}
522 
523 	/* AArch64 Memory Model Feature Register 1 */
524 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
525 		printed = 0;
526 		printf("      Memory Model Features 1 = <");
527 
528 		switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
529 		case ID_AA64MMFR1_PAN_NONE:
530 			break;
531 		case ID_AA64MMFR1_PAN_IMPL:
532 			printf("%sPAN", SEP_STR);
533 			break;
534 		default:
535 			printf("%sUnknown PAN", SEP_STR);
536 			break;
537 		}
538 
539 		switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
540 		case ID_AA64MMFR1_LO_NONE:
541 			break;
542 		case ID_AA64MMFR1_LO_IMPL:
543 			printf("%sLO", SEP_STR);
544 			break;
545 		default:
546 			printf("%sUnknown LO", SEP_STR);
547 			break;
548 		}
549 
550 		switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
551 		case ID_AA64MMFR1_HPDS_NONE:
552 			break;
553 		case ID_AA64MMFR1_HPDS_IMPL:
554 			printf("%sHPDS", SEP_STR);
555 			break;
556 		default:
557 			printf("%sUnknown HPDS", SEP_STR);
558 			break;
559 		}
560 
561 		switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
562 		case ID_AA64MMFR1_VH_NONE:
563 			break;
564 		case ID_AA64MMFR1_VH_IMPL:
565 			printf("%sVHE", SEP_STR);
566 			break;
567 		default:
568 			printf("%sUnknown VHE", SEP_STR);
569 			break;
570 		}
571 
572 		switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
573 		case ID_AA64MMFR1_VMIDBITS_8:
574 			break;
575 		case ID_AA64MMFR1_VMIDBITS_16:
576 			printf("%s16 VMID bits", SEP_STR);
577 			break;
578 		default:
579 			printf("%sUnknown VMID bits", SEP_STR);
580 			break;
581 		}
582 
583 		switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
584 		case ID_AA64MMFR1_HAFDBS_NONE:
585 			break;
586 		case ID_AA64MMFR1_HAFDBS_AF:
587 			printf("%sAF", SEP_STR);
588 			break;
589 		case ID_AA64MMFR1_HAFDBS_AF_DBS:
590 			printf("%sAF+DBS", SEP_STR);
591 			break;
592 		default:
593 			printf("%sUnknown Hardware update AF/DBS", SEP_STR);
594 			break;
595 		}
596 
597 		if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
598 			printf("%s%#lx", SEP_STR,
599 			    cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
600 		printf(">\n");
601 	}
602 
603 	/* AArch64 Debug Feature Register 0 */
604 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
605 		printed = 0;
606 		printf("             Debug Features 0 = <");
607 		printf("%s%lu CTX Breakpoints", SEP_STR,
608 		    ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
609 
610 		printf("%s%lu Watchpoints", SEP_STR,
611 		    ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
612 
613 		printf("%s%lu Breakpoints", SEP_STR,
614 		    ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
615 
616 		switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
617 		case ID_AA64DFR0_PMU_VER_NONE:
618 			break;
619 		case ID_AA64DFR0_PMU_VER_3:
620 			printf("%sPMUv3", SEP_STR);
621 			break;
622 		case ID_AA64DFR0_PMU_VER_3_1:
623 			printf("%sPMUv3+16 bit evtCount", SEP_STR);
624 			break;
625 		case ID_AA64DFR0_PMU_VER_IMPL:
626 			printf("%sImplementation defined PMU", SEP_STR);
627 			break;
628 		default:
629 			printf("%sUnknown PMU", SEP_STR);
630 			break;
631 		}
632 
633 		switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
634 		case ID_AA64DFR0_TRACE_VER_NONE:
635 			break;
636 		case ID_AA64DFR0_TRACE_VER_IMPL:
637 			printf("%sTrace", SEP_STR);
638 			break;
639 		default:
640 			printf("%sUnknown Trace", SEP_STR);
641 			break;
642 		}
643 
644 		switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
645 		case ID_AA64DFR0_DEBUG_VER_8:
646 			printf("%sDebug v8", SEP_STR);
647 			break;
648 		case ID_AA64DFR0_DEBUG_VER_8_VHE:
649 			printf("%sDebug v8+VHE", SEP_STR);
650 			break;
651 		default:
652 			printf("%sUnknown Debug", SEP_STR);
653 			break;
654 		}
655 
656 		if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
657 			printf("%s%#lx", SEP_STR,
658 			    cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
659 		printf(">\n");
660 	}
661 
662 	/* AArch64 Memory Model Feature Register 1 */
663 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
664 		printf("             Debug Features 1 = <%#lx>\n",
665 		    cpu_desc[cpu].id_aa64dfr1);
666 	}
667 
668 	/* AArch64 Auxiliary Feature Register 0 */
669 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
670 		printf("         Auxiliary Features 0 = <%#lx>\n",
671 		    cpu_desc[cpu].id_aa64afr0);
672 	}
673 
674 	/* AArch64 Auxiliary Feature Register 1 */
675 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
676 		printf("         Auxiliary Features 1 = <%#lx>\n",
677 		    cpu_desc[cpu].id_aa64afr1);
678 	}
679 
680 #undef SEP_STR
681 }
682 
683 void
684 identify_cpu(void)
685 {
686 	u_int midr;
687 	u_int impl_id;
688 	u_int part_id;
689 	u_int cpu;
690 	size_t i;
691 	const struct cpu_parts *cpu_partsp = NULL;
692 
693 	cpu = PCPU_GET(cpuid);
694 	midr = get_midr();
695 
696 	/*
697 	 * Store midr to pcpu to allow fast reading
698 	 * from EL0, EL1 and assembly code.
699 	 */
700 	PCPU_SET(midr, midr);
701 
702 	impl_id = CPU_IMPL(midr);
703 	for (i = 0; i < nitems(cpu_implementers); i++) {
704 		if (impl_id == cpu_implementers[i].impl_id ||
705 		    cpu_implementers[i].impl_id == 0) {
706 			cpu_desc[cpu].cpu_impl = impl_id;
707 			cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
708 			cpu_partsp = cpu_implementers[i].cpu_parts;
709 			break;
710 		}
711 	}
712 
713 	part_id = CPU_PART(midr);
714 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
715 		if (part_id == cpu_partsp[i].part_id ||
716 		    cpu_partsp[i].part_id == 0) {
717 			cpu_desc[cpu].cpu_part_num = part_id;
718 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
719 			break;
720 		}
721 	}
722 
723 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
724 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
725 
726 	/* Save affinity for current CPU */
727 	cpu_desc[cpu].mpidr = get_mpidr();
728 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
729 
730 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
731 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
732 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
733 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
734 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
735 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
736 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
737 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
738 
739 	if (cpu != 0) {
740 		/*
741 		 * This code must run on one cpu at a time, but we are
742 		 * not scheduling on the current core so implement a
743 		 * simple spinlock.
744 		 */
745 		while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
746 			__asm __volatile("wfe" ::: "memory");
747 
748 		switch (cpu_aff_levels) {
749 		case 0:
750 			if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
751 			    CPU_AFF0(cpu_desc[0].mpidr))
752 				cpu_aff_levels = 1;
753 			/* FALLTHROUGH */
754 		case 1:
755 			if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
756 			    CPU_AFF1(cpu_desc[0].mpidr))
757 				cpu_aff_levels = 2;
758 			/* FALLTHROUGH */
759 		case 2:
760 			if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
761 			    CPU_AFF2(cpu_desc[0].mpidr))
762 				cpu_aff_levels = 3;
763 			/* FALLTHROUGH */
764 		case 3:
765 			if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
766 			    CPU_AFF3(cpu_desc[0].mpidr))
767 				cpu_aff_levels = 4;
768 			break;
769 		}
770 
771 		if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
772 			cpu_print_regs |= PRINT_ID_AA64_AFR0;
773 		if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
774 			cpu_print_regs |= PRINT_ID_AA64_AFR1;
775 
776 		if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
777 			cpu_print_regs |= PRINT_ID_AA64_DFR0;
778 		if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
779 			cpu_print_regs |= PRINT_ID_AA64_DFR1;
780 
781 		if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
782 			cpu_print_regs |= PRINT_ID_AA64_ISAR0;
783 		if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
784 			cpu_print_regs |= PRINT_ID_AA64_ISAR1;
785 
786 		if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
787 			cpu_print_regs |= PRINT_ID_AA64_MMFR0;
788 		if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
789 			cpu_print_regs |= PRINT_ID_AA64_MMFR1;
790 
791 		if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
792 			cpu_print_regs |= PRINT_ID_AA64_PFR0;
793 		if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
794 			cpu_print_regs |= PRINT_ID_AA64_PFR1;
795 
796 		/* Wake up the other CPUs */
797 		atomic_store_rel_int(&ident_lock, 0);
798 		__asm __volatile("sev" ::: "memory");
799 	}
800 }
801