xref: /freebsd/sys/arm64/arm64/identcpu.c (revision 780fb4a2)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/smp.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 
42 #include <machine/atomic.h>
43 #include <machine/cpu.h>
44 #include <machine/cpufunc.h>
45 
46 static int ident_lock;
47 
48 char machine[] = "arm64";
49 
50 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
51     "Machine class");
52 
53 /*
54  * Per-CPU affinity as provided in MPIDR_EL1
55  * Indexed by CPU number in logical order selected by the system.
56  * Relevant fields can be extracted using CPU_AFFn macros,
57  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
58  *
59  * Fields used by us:
60  * Aff1 - Cluster number
61  * Aff0 - CPU number in Aff1 cluster
62  */
63 uint64_t __cpu_affinity[MAXCPU];
64 static u_int cpu_aff_levels;
65 
66 struct cpu_desc {
67 	u_int		cpu_impl;
68 	u_int		cpu_part_num;
69 	u_int		cpu_variant;
70 	u_int		cpu_revision;
71 	const char	*cpu_impl_name;
72 	const char	*cpu_part_name;
73 
74 	uint64_t	mpidr;
75 	uint64_t	id_aa64afr0;
76 	uint64_t	id_aa64afr1;
77 	uint64_t	id_aa64dfr0;
78 	uint64_t	id_aa64dfr1;
79 	uint64_t	id_aa64isar0;
80 	uint64_t	id_aa64isar1;
81 	uint64_t	id_aa64mmfr0;
82 	uint64_t	id_aa64mmfr1;
83 	uint64_t	id_aa64mmfr2;
84 	uint64_t	id_aa64pfr0;
85 	uint64_t	id_aa64pfr1;
86 };
87 
88 struct cpu_desc cpu_desc[MAXCPU];
89 static u_int cpu_print_regs;
90 #define	PRINT_ID_AA64_AFR0	0x00000001
91 #define	PRINT_ID_AA64_AFR1	0x00000002
92 #define	PRINT_ID_AA64_DFR0	0x00000010
93 #define	PRINT_ID_AA64_DFR1	0x00000020
94 #define	PRINT_ID_AA64_ISAR0	0x00000100
95 #define	PRINT_ID_AA64_ISAR1	0x00000200
96 #define	PRINT_ID_AA64_MMFR0	0x00001000
97 #define	PRINT_ID_AA64_MMFR1	0x00002000
98 #define	PRINT_ID_AA64_MMFR2	0x00004000
99 #define	PRINT_ID_AA64_PFR0	0x00010000
100 #define	PRINT_ID_AA64_PFR1	0x00020000
101 
102 struct cpu_parts {
103 	u_int		part_id;
104 	const char	*part_name;
105 };
106 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
107 
108 struct cpu_implementers {
109 	u_int			impl_id;
110 	const char		*impl_name;
111 	/*
112 	 * Part number is implementation defined
113 	 * so each vendor will have its own set of values and names.
114 	 */
115 	const struct cpu_parts	*cpu_parts;
116 };
117 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
118 
119 /*
120  * Per-implementer table of (PartNum, CPU Name) pairs.
121  */
122 /* ARM Ltd. */
123 static const struct cpu_parts cpu_parts_arm[] = {
124 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
125 	{ CPU_PART_CORTEX_A35, "Cortex-A35" },
126 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
127 	{ CPU_PART_CORTEX_A55, "Cortex-A55" },
128 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
129 	{ CPU_PART_CORTEX_A72, "Cortex-A72" },
130 	{ CPU_PART_CORTEX_A73, "Cortex-A73" },
131 	{ CPU_PART_CORTEX_A75, "Cortex-A75" },
132 	CPU_PART_NONE,
133 };
134 /* Cavium */
135 static const struct cpu_parts cpu_parts_cavium[] = {
136 	{ CPU_PART_THUNDERX, "ThunderX" },
137 	{ CPU_PART_THUNDERX2, "ThunderX2" },
138 	CPU_PART_NONE,
139 };
140 
141 /* Unknown */
142 static const struct cpu_parts cpu_parts_none[] = {
143 	CPU_PART_NONE,
144 };
145 
146 /*
147  * Implementers table.
148  */
149 const struct cpu_implementers cpu_implementers[] = {
150 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
151 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
152 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
153 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
154 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
155 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
156 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
157 	{ CPU_IMPL_APM,		"APM",		cpu_parts_none },
158 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
159 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
160 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
161 	CPU_IMPLEMENTER_NONE,
162 };
163 
164 static void
165 identify_cpu_sysinit(void *dummy __unused)
166 {
167 	int cpu;
168 
169 	CPU_FOREACH(cpu) {
170 		print_cpu_features(cpu);
171 	}
172 }
173 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
174 
175 void
176 print_cpu_features(u_int cpu)
177 {
178 	int printed;
179 
180 	printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name,
181 	    cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant,
182 	    cpu_desc[cpu].cpu_revision);
183 
184 	printf(" affinity:");
185 	switch(cpu_aff_levels) {
186 	default:
187 	case 4:
188 		printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
189 		/* FALLTHROUGH */
190 	case 3:
191 		printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
192 		/* FALLTHROUGH */
193 	case 2:
194 		printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
195 		/* FALLTHROUGH */
196 	case 1:
197 	case 0: /* On UP this will be zero */
198 		printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
199 		break;
200 	}
201 	printf("\n");
202 
203 	/*
204 	 * There is a hardware errata where, if one CPU is performing a TLB
205 	 * invalidation while another is performing a store-exclusive the
206 	 * store-exclusive may return the wrong status. A workaround seems
207 	 * to be to use an IPI to invalidate on each CPU, however given the
208 	 * limited number of affected units (pass 1.1 is the evaluation
209 	 * hardware revision), and the lack of information from Cavium
210 	 * this has not been implemented.
211 	 *
212 	 * At the time of writing this the only information is from:
213 	 * https://lkml.org/lkml/2016/8/4/722
214 	 */
215 	/*
216 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
217 	 * triggers on pass 2.0+.
218 	 */
219 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
220 	    CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
221 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
222 		    "hardware bugs that may cause the incorrect operation of "
223 		    "atomic operations.\n");
224 
225 	if (cpu != 0 && cpu_print_regs == 0)
226 		return;
227 
228 #define SEP_STR	((printed++) == 0) ? "" : ","
229 
230 	/* AArch64 Instruction Set Attribute Register 0 */
231 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
232 		printed = 0;
233 		printf(" Instruction Set Attributes 0 = <");
234 
235 		switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
236 		case ID_AA64ISAR0_RDM_NONE:
237 			break;
238 		case ID_AA64ISAR0_RDM_IMPL:
239 			printf("%sRDM", SEP_STR);
240 			break;
241 		default:
242 			printf("%sUnknown RDM", SEP_STR);
243 		}
244 
245 		switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
246 		case ID_AA64ISAR0_ATOMIC_NONE:
247 			break;
248 		case ID_AA64ISAR0_ATOMIC_IMPL:
249 			printf("%sAtomic", SEP_STR);
250 			break;
251 		default:
252 			printf("%sUnknown Atomic", SEP_STR);
253 		}
254 
255 		switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
256 		case ID_AA64ISAR0_AES_NONE:
257 			break;
258 		case ID_AA64ISAR0_AES_BASE:
259 			printf("%sAES", SEP_STR);
260 			break;
261 		case ID_AA64ISAR0_AES_PMULL:
262 			printf("%sAES+PMULL", SEP_STR);
263 			break;
264 		default:
265 			printf("%sUnknown AES", SEP_STR);
266 			break;
267 		}
268 
269 		switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
270 		case ID_AA64ISAR0_SHA1_NONE:
271 			break;
272 		case ID_AA64ISAR0_SHA1_BASE:
273 			printf("%sSHA1", SEP_STR);
274 			break;
275 		default:
276 			printf("%sUnknown SHA1", SEP_STR);
277 			break;
278 		}
279 
280 		switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
281 		case ID_AA64ISAR0_SHA2_NONE:
282 			break;
283 		case ID_AA64ISAR0_SHA2_BASE:
284 			printf("%sSHA2", SEP_STR);
285 			break;
286 		case ID_AA64ISAR0_SHA2_512:
287 			printf("%sSHA2+SHA512", SEP_STR);
288 			break;
289 		default:
290 			printf("%sUnknown SHA2", SEP_STR);
291 			break;
292 		}
293 
294 		switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
295 		case ID_AA64ISAR0_CRC32_NONE:
296 			break;
297 		case ID_AA64ISAR0_CRC32_BASE:
298 			printf("%sCRC32", SEP_STR);
299 			break;
300 		default:
301 			printf("%sUnknown CRC32", SEP_STR);
302 			break;
303 		}
304 
305 		switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
306 		case ID_AA64ISAR0_SHA3_NONE:
307 			break;
308 		case ID_AA64ISAR0_SHA3_IMPL:
309 			printf("%sSHA3", SEP_STR);
310 			break;
311 		default:
312 			printf("%sUnknown SHA3", SEP_STR);
313 			break;
314 		}
315 
316 		switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
317 		case ID_AA64ISAR0_SM3_NONE:
318 			break;
319 		case ID_AA64ISAR0_SM3_IMPL:
320 			printf("%sSM3", SEP_STR);
321 			break;
322 		default:
323 			printf("%sUnknown SM3", SEP_STR);
324 			break;
325 		}
326 
327 		switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
328 		case ID_AA64ISAR0_SM4_NONE:
329 			break;
330 		case ID_AA64ISAR0_SM4_IMPL:
331 			printf("%sSM4", SEP_STR);
332 			break;
333 		default:
334 			printf("%sUnknown SM4", SEP_STR);
335 			break;
336 		}
337 
338 		switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
339 		case ID_AA64ISAR0_DP_NONE:
340 			break;
341 		case ID_AA64ISAR0_DP_IMPL:
342 			printf("%sDotProd", SEP_STR);
343 			break;
344 		default:
345 			printf("%sUnknown DP", SEP_STR);
346 			break;
347 		}
348 
349 		if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
350 			printf("%s%#lx", SEP_STR,
351 			    cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
352 
353 		printf(">\n");
354 	}
355 
356 	/* AArch64 Instruction Set Attribute Register 1 */
357 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
358 		printed = 0;
359 		printf(" Instruction Set Attributes 1 = <");
360 
361 		switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
362 		case ID_AA64ISAR1_GPI_NONE:
363 			break;
364 		case ID_AA64ISAR1_GPI_IMPL:
365 			printf("%sImpl GenericAuth", SEP_STR);
366 			break;
367 		default:
368 			printf("%sUnknown GenericAuth", SEP_STR);
369 			break;
370 		}
371 
372 		switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
373 		case ID_AA64ISAR1_GPA_NONE:
374 			break;
375 		case ID_AA64ISAR1_GPA_IMPL:
376 			printf("%sPrince GenericAuth", SEP_STR);
377 			break;
378 		default:
379 			printf("%sUnknown GenericAuth", SEP_STR);
380 			break;
381 		}
382 
383 		switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
384 		case ID_AA64ISAR1_LRCPC_NONE:
385 			break;
386 		case ID_AA64ISAR1_LRCPC_IMPL:
387 			printf("%sRCpc", SEP_STR);
388 			break;
389 		default:
390 			printf("%sUnknown RCpc", SEP_STR);
391 			break;
392 		}
393 
394 		switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
395 		case ID_AA64ISAR1_FCMA_NONE:
396 			break;
397 		case ID_AA64ISAR1_FCMA_IMPL:
398 			printf("%sFCMA", SEP_STR);
399 			break;
400 		default:
401 			printf("%sUnknown FCMA", SEP_STR);
402 			break;
403 		}
404 
405 		switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
406 		case ID_AA64ISAR1_JSCVT_NONE:
407 			break;
408 		case ID_AA64ISAR1_JSCVT_IMPL:
409 			printf("%sJS Conv", SEP_STR);
410 			break;
411 		default:
412 			printf("%sUnknown JS Conv", SEP_STR);
413 			break;
414 		}
415 
416 		switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
417 		case ID_AA64ISAR1_API_NONE:
418 			break;
419 		case ID_AA64ISAR1_API_IMPL:
420 			printf("%sImpl AddrAuth", SEP_STR);
421 			break;
422 		default:
423 			printf("%sUnknown Impl AddrAuth", SEP_STR);
424 			break;
425 		}
426 
427 		switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
428 		case ID_AA64ISAR1_APA_NONE:
429 			break;
430 		case ID_AA64ISAR1_APA_IMPL:
431 			printf("%sPrince AddrAuth", SEP_STR);
432 			break;
433 		default:
434 			printf("%sUnknown Prince AddrAuth", SEP_STR);
435 			break;
436 		}
437 
438 		switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
439 		case ID_AA64ISAR1_DPB_NONE:
440 			break;
441 		case ID_AA64ISAR1_DPB_IMPL:
442 			printf("%sDC CVAP", SEP_STR);
443 			break;
444 		default:
445 			printf("%sUnknown DC CVAP", SEP_STR);
446 			break;
447 		}
448 
449 		if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
450 			printf("%s%#lx", SEP_STR,
451 			    cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
452 		printf(">\n");
453 	}
454 
455 	/* AArch64 Processor Feature Register 0 */
456 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
457 		printed = 0;
458 		printf("         Processor Features 0 = <");
459 
460 		switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
461 		case ID_AA64PFR0_SVE_NONE:
462 			break;
463 		case ID_AA64PFR0_SVE_IMPL:
464 			printf("%sSVE", SEP_STR);
465 			break;
466 		default:
467 			printf("%sUnknown SVE", SEP_STR);
468 			break;
469 		}
470 
471 		switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
472 		case ID_AA64PFR0_RAS_NONE:
473 			break;
474 		case ID_AA64PFR0_RAS_V1:
475 			printf("%sRASv1", SEP_STR);
476 			break;
477 		default:
478 			printf("%sUnknown RAS", SEP_STR);
479 			break;
480 		}
481 
482 		switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
483 		case ID_AA64PFR0_GIC_CPUIF_NONE:
484 			break;
485 		case ID_AA64PFR0_GIC_CPUIF_EN:
486 			printf("%sGIC", SEP_STR);
487 			break;
488 		default:
489 			printf("%sUnknown GIC interface", SEP_STR);
490 			break;
491 		}
492 
493 		switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
494 		case ID_AA64PFR0_ADV_SIMD_NONE:
495 			break;
496 		case ID_AA64PFR0_ADV_SIMD_IMPL:
497 			printf("%sAdvSIMD", SEP_STR);
498 			break;
499 		case ID_AA64PFR0_ADV_SIMD_HP:
500 			printf("%sAdvSIMD+HP", SEP_STR);
501 			break;
502 		default:
503 			printf("%sUnknown AdvSIMD", SEP_STR);
504 			break;
505 		}
506 
507 		switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
508 		case ID_AA64PFR0_FP_NONE:
509 			break;
510 		case ID_AA64PFR0_FP_IMPL:
511 			printf("%sFloat", SEP_STR);
512 			break;
513 		case ID_AA64PFR0_FP_HP:
514 			printf("%sFloat+HP", SEP_STR);
515 			break;
516 		default:
517 			printf("%sUnknown Float", SEP_STR);
518 			break;
519 		}
520 
521 		switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
522 		case ID_AA64PFR0_EL3_NONE:
523 			printf("%sNo EL3", SEP_STR);
524 			break;
525 		case ID_AA64PFR0_EL3_64:
526 			printf("%sEL3", SEP_STR);
527 			break;
528 		case ID_AA64PFR0_EL3_64_32:
529 			printf("%sEL3 32", SEP_STR);
530 			break;
531 		default:
532 			printf("%sUnknown EL3", SEP_STR);
533 			break;
534 		}
535 
536 		switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
537 		case ID_AA64PFR0_EL2_NONE:
538 			printf("%sNo EL2", SEP_STR);
539 			break;
540 		case ID_AA64PFR0_EL2_64:
541 			printf("%sEL2", SEP_STR);
542 			break;
543 		case ID_AA64PFR0_EL2_64_32:
544 			printf("%sEL2 32", SEP_STR);
545 			break;
546 		default:
547 			printf("%sUnknown EL2", SEP_STR);
548 			break;
549 		}
550 
551 		switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
552 		case ID_AA64PFR0_EL1_64:
553 			printf("%sEL1", SEP_STR);
554 			break;
555 		case ID_AA64PFR0_EL1_64_32:
556 			printf("%sEL1 32", SEP_STR);
557 			break;
558 		default:
559 			printf("%sUnknown EL1", SEP_STR);
560 			break;
561 		}
562 
563 		switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
564 		case ID_AA64PFR0_EL0_64:
565 			printf("%sEL0", SEP_STR);
566 			break;
567 		case ID_AA64PFR0_EL0_64_32:
568 			printf("%sEL0 32", SEP_STR);
569 			break;
570 		default:
571 			printf("%sUnknown EL0", SEP_STR);
572 			break;
573 		}
574 
575 		if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
576 			printf("%s%#lx", SEP_STR,
577 			    cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
578 
579 		printf(">\n");
580 	}
581 
582 	/* AArch64 Processor Feature Register 1 */
583 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
584 		printf("         Processor Features 1 = <%#lx>\n",
585 		    cpu_desc[cpu].id_aa64pfr1);
586 	}
587 
588 	/* AArch64 Memory Model Feature Register 0 */
589 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
590 		printed = 0;
591 		printf("      Memory Model Features 0 = <");
592 		switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
593 		case ID_AA64MMFR0_TGRAN4_NONE:
594 			break;
595 		case ID_AA64MMFR0_TGRAN4_IMPL:
596 			printf("%s4k Granule", SEP_STR);
597 			break;
598 		default:
599 			printf("%sUnknown 4k Granule", SEP_STR);
600 			break;
601 		}
602 
603 		switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
604 		case ID_AA64MMFR0_TGRAN16_NONE:
605 			break;
606 		case ID_AA64MMFR0_TGRAN16_IMPL:
607 			printf("%s16k Granule", SEP_STR);
608 			break;
609 		default:
610 			printf("%sUnknown 16k Granule", SEP_STR);
611 			break;
612 		}
613 
614 		switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
615 		case ID_AA64MMFR0_TGRAN64_NONE:
616 			break;
617 		case ID_AA64MMFR0_TGRAN64_IMPL:
618 			printf("%s64k Granule", SEP_STR);
619 			break;
620 		default:
621 			printf("%sUnknown 64k Granule", SEP_STR);
622 			break;
623 		}
624 
625 		switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
626 		case ID_AA64MMFR0_BIGEND_FIXED:
627 			break;
628 		case ID_AA64MMFR0_BIGEND_MIXED:
629 			printf("%sMixedEndian", SEP_STR);
630 			break;
631 		default:
632 			printf("%sUnknown Endian switching", SEP_STR);
633 			break;
634 		}
635 
636 		switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
637 		case ID_AA64MMFR0_BIGEND_EL0_FIXED:
638 			break;
639 		case ID_AA64MMFR0_BIGEND_EL0_MIXED:
640 			printf("%sEL0 MixEndian", SEP_STR);
641 			break;
642 		default:
643 			printf("%sUnknown EL0 Endian switching", SEP_STR);
644 			break;
645 		}
646 
647 		switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
648 		case ID_AA64MMFR0_S_NS_MEM_NONE:
649 			break;
650 		case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
651 			printf("%sS/NS Mem", SEP_STR);
652 			break;
653 		default:
654 			printf("%sUnknown S/NS Mem", SEP_STR);
655 			break;
656 		}
657 
658 		switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
659 		case ID_AA64MMFR0_ASID_BITS_8:
660 			printf("%s8bit ASID", SEP_STR);
661 			break;
662 		case ID_AA64MMFR0_ASID_BITS_16:
663 			printf("%s16bit ASID", SEP_STR);
664 			break;
665 		default:
666 			printf("%sUnknown ASID", SEP_STR);
667 			break;
668 		}
669 
670 		switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
671 		case ID_AA64MMFR0_PA_RANGE_4G:
672 			printf("%s4GB PA", SEP_STR);
673 			break;
674 		case ID_AA64MMFR0_PA_RANGE_64G:
675 			printf("%s64GB PA", SEP_STR);
676 			break;
677 		case ID_AA64MMFR0_PA_RANGE_1T:
678 			printf("%s1TB PA", SEP_STR);
679 			break;
680 		case ID_AA64MMFR0_PA_RANGE_4T:
681 			printf("%s4TB PA", SEP_STR);
682 			break;
683 		case ID_AA64MMFR0_PA_RANGE_16T:
684 			printf("%s16TB PA", SEP_STR);
685 			break;
686 		case ID_AA64MMFR0_PA_RANGE_256T:
687 			printf("%s256TB PA", SEP_STR);
688 			break;
689 		case ID_AA64MMFR0_PA_RANGE_4P:
690 			printf("%s4PB PA", SEP_STR);
691 			break;
692 		default:
693 			printf("%sUnknown PA Range", SEP_STR);
694 			break;
695 		}
696 
697 		if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
698 			printf("%s%#lx", SEP_STR,
699 			    cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
700 		printf(">\n");
701 	}
702 
703 	/* AArch64 Memory Model Feature Register 1 */
704 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
705 		printed = 0;
706 		printf("      Memory Model Features 1 = <");
707 
708 		switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
709 		case ID_AA64MMFR1_XNX_NONE:
710 			break;
711 		case ID_AA64MMFR1_XNX_IMPL:
712 			printf("%sEL2 XN", SEP_STR);
713 			break;
714 		default:
715 			printf("%sUnknown XNX", SEP_STR);
716 			break;
717 		}
718 
719 		switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
720 		case ID_AA64MMFR1_SPEC_SEI_NONE:
721 			break;
722 		case ID_AA64MMFR1_SPEC_SEI_IMPL:
723 			printf("%sSpecSEI", SEP_STR);
724 			break;
725 		default:
726 			printf("%sUnknown SpecSEI", SEP_STR);
727 			break;
728 		}
729 
730 		switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
731 		case ID_AA64MMFR1_PAN_NONE:
732 			break;
733 		case ID_AA64MMFR1_PAN_IMPL:
734 			printf("%sPAN", SEP_STR);
735 			break;
736 		case ID_AA64MMFR1_PAN_ATS1E1:
737 			printf("%sPAN+AT", SEP_STR);
738 			break;
739 		default:
740 			printf("%sUnknown PAN", SEP_STR);
741 			break;
742 		}
743 
744 		switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
745 		case ID_AA64MMFR1_LO_NONE:
746 			break;
747 		case ID_AA64MMFR1_LO_IMPL:
748 			printf("%sLO", SEP_STR);
749 			break;
750 		default:
751 			printf("%sUnknown LO", SEP_STR);
752 			break;
753 		}
754 
755 		switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
756 		case ID_AA64MMFR1_HPDS_NONE:
757 			break;
758 		case ID_AA64MMFR1_HPDS_HPD:
759 			printf("%sHPDS", SEP_STR);
760 			break;
761 		case ID_AA64MMFR1_HPDS_TTPBHA:
762 			printf("%sTTPBHA", SEP_STR);
763 			break;
764 		default:
765 			printf("%sUnknown HPDS", SEP_STR);
766 			break;
767 		}
768 
769 		switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
770 		case ID_AA64MMFR1_VH_NONE:
771 			break;
772 		case ID_AA64MMFR1_VH_IMPL:
773 			printf("%sVHE", SEP_STR);
774 			break;
775 		default:
776 			printf("%sUnknown VHE", SEP_STR);
777 			break;
778 		}
779 
780 		switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
781 		case ID_AA64MMFR1_VMIDBITS_8:
782 			break;
783 		case ID_AA64MMFR1_VMIDBITS_16:
784 			printf("%s16 VMID bits", SEP_STR);
785 			break;
786 		default:
787 			printf("%sUnknown VMID bits", SEP_STR);
788 			break;
789 		}
790 
791 		switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
792 		case ID_AA64MMFR1_HAFDBS_NONE:
793 			break;
794 		case ID_AA64MMFR1_HAFDBS_AF:
795 			printf("%sAF", SEP_STR);
796 			break;
797 		case ID_AA64MMFR1_HAFDBS_AF_DBS:
798 			printf("%sAF+DBS", SEP_STR);
799 			break;
800 		default:
801 			printf("%sUnknown Hardware update AF/DBS", SEP_STR);
802 			break;
803 		}
804 
805 		if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
806 			printf("%s%#lx", SEP_STR,
807 			    cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
808 		printf(">\n");
809 	}
810 
811 	/* AArch64 Memory Model Feature Register 2 */
812 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
813 		printed = 0;
814 		printf("      Memory Model Features 2 = <");
815 
816 		switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
817 		case ID_AA64MMFR2_NV_NONE:
818 			break;
819 		case ID_AA64MMFR2_NV_IMPL:
820 			printf("%sNestedVirt", SEP_STR);
821 			break;
822 		default:
823 			printf("%sUnknown NestedVirt", SEP_STR);
824 			break;
825 		}
826 
827 		switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
828 		case ID_AA64MMFR2_CCIDX_32:
829 			printf("%s32b CCIDX", SEP_STR);
830 			break;
831 		case ID_AA64MMFR2_CCIDX_64:
832 			printf("%s64b CCIDX", SEP_STR);
833 			break;
834 		default:
835 			printf("%sUnknown CCIDX", SEP_STR);
836 			break;
837 		}
838 
839 		switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
840 		case ID_AA64MMFR2_VA_RANGE_48:
841 			printf("%s48b VA", SEP_STR);
842 			break;
843 		case ID_AA64MMFR2_VA_RANGE_52:
844 			printf("%s52b VA", SEP_STR);
845 			break;
846 		default:
847 			printf("%sUnknown VA Range", SEP_STR);
848 			break;
849 		}
850 
851 		switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
852 		case ID_AA64MMFR2_IESB_NONE:
853 			break;
854 		case ID_AA64MMFR2_IESB_IMPL:
855 			printf("%sIESB", SEP_STR);
856 			break;
857 		default:
858 			printf("%sUnknown IESB", SEP_STR);
859 			break;
860 		}
861 
862 		switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
863 		case ID_AA64MMFR2_LSM_NONE:
864 			break;
865 		case ID_AA64MMFR2_LSM_IMPL:
866 			printf("%sLSM", SEP_STR);
867 			break;
868 		default:
869 			printf("%sUnknown LSM", SEP_STR);
870 			break;
871 		}
872 
873 		switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
874 		case ID_AA64MMFR2_UAO_NONE:
875 			break;
876 		case ID_AA64MMFR2_UAO_IMPL:
877 			printf("%sUAO", SEP_STR);
878 			break;
879 		default:
880 			printf("%sUnknown UAO", SEP_STR);
881 			break;
882 		}
883 
884 		switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
885 		case ID_AA64MMFR2_CNP_NONE:
886 			break;
887 		case ID_AA64MMFR2_CNP_IMPL:
888 			printf("%sCnP", SEP_STR);
889 			break;
890 		default:
891 			printf("%sUnknown CnP", SEP_STR);
892 			break;
893 		}
894 
895 		if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
896 			printf("%s%#lx", SEP_STR,
897 			    cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
898 		printf(">\n");
899 	}
900 
901 	/* AArch64 Debug Feature Register 0 */
902 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
903 		printed = 0;
904 		printf("             Debug Features 0 = <");
905 		switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
906 		case ID_AA64DFR0_PMS_VER_NONE:
907 			break;
908 		case ID_AA64DFR0_PMS_VER_V1:
909 			printf("%sSPE v1", SEP_STR);
910 			break;
911 		default:
912 			printf("%sUnknown SPE", SEP_STR);
913 			break;
914 		}
915 
916 		printf("%s%lu CTX Breakpoints", SEP_STR,
917 		    ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
918 
919 		printf("%s%lu Watchpoints", SEP_STR,
920 		    ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
921 
922 		printf("%s%lu Breakpoints", SEP_STR,
923 		    ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
924 
925 		switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
926 		case ID_AA64DFR0_PMU_VER_NONE:
927 			break;
928 		case ID_AA64DFR0_PMU_VER_3:
929 			printf("%sPMUv3", SEP_STR);
930 			break;
931 		case ID_AA64DFR0_PMU_VER_3_1:
932 			printf("%sPMUv3+16 bit evtCount", SEP_STR);
933 			break;
934 		case ID_AA64DFR0_PMU_VER_IMPL:
935 			printf("%sImplementation defined PMU", SEP_STR);
936 			break;
937 		default:
938 			printf("%sUnknown PMU", SEP_STR);
939 			break;
940 		}
941 
942 		switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
943 		case ID_AA64DFR0_TRACE_VER_NONE:
944 			break;
945 		case ID_AA64DFR0_TRACE_VER_IMPL:
946 			printf("%sTrace", SEP_STR);
947 			break;
948 		default:
949 			printf("%sUnknown Trace", SEP_STR);
950 			break;
951 		}
952 
953 		switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
954 		case ID_AA64DFR0_DEBUG_VER_8:
955 			printf("%sDebug v8", SEP_STR);
956 			break;
957 		case ID_AA64DFR0_DEBUG_VER_8_VHE:
958 			printf("%sDebug v8+VHE", SEP_STR);
959 			break;
960 		case ID_AA64DFR0_DEBUG_VER_8_2:
961 			printf("%sDebug v8.2", SEP_STR);
962 			break;
963 		default:
964 			printf("%sUnknown Debug", SEP_STR);
965 			break;
966 		}
967 
968 		if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
969 			printf("%s%#lx", SEP_STR,
970 			    cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
971 		printf(">\n");
972 	}
973 
974 	/* AArch64 Memory Model Feature Register 1 */
975 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
976 		printf("             Debug Features 1 = <%#lx>\n",
977 		    cpu_desc[cpu].id_aa64dfr1);
978 	}
979 
980 	/* AArch64 Auxiliary Feature Register 0 */
981 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
982 		printf("         Auxiliary Features 0 = <%#lx>\n",
983 		    cpu_desc[cpu].id_aa64afr0);
984 	}
985 
986 	/* AArch64 Auxiliary Feature Register 1 */
987 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
988 		printf("         Auxiliary Features 1 = <%#lx>\n",
989 		    cpu_desc[cpu].id_aa64afr1);
990 	}
991 
992 #undef SEP_STR
993 }
994 
995 void
996 identify_cpu(void)
997 {
998 	u_int midr;
999 	u_int impl_id;
1000 	u_int part_id;
1001 	u_int cpu;
1002 	size_t i;
1003 	const struct cpu_parts *cpu_partsp = NULL;
1004 
1005 	cpu = PCPU_GET(cpuid);
1006 	midr = get_midr();
1007 
1008 	/*
1009 	 * Store midr to pcpu to allow fast reading
1010 	 * from EL0, EL1 and assembly code.
1011 	 */
1012 	PCPU_SET(midr, midr);
1013 
1014 	impl_id = CPU_IMPL(midr);
1015 	for (i = 0; i < nitems(cpu_implementers); i++) {
1016 		if (impl_id == cpu_implementers[i].impl_id ||
1017 		    cpu_implementers[i].impl_id == 0) {
1018 			cpu_desc[cpu].cpu_impl = impl_id;
1019 			cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1020 			cpu_partsp = cpu_implementers[i].cpu_parts;
1021 			break;
1022 		}
1023 	}
1024 
1025 	part_id = CPU_PART(midr);
1026 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
1027 		if (part_id == cpu_partsp[i].part_id ||
1028 		    cpu_partsp[i].part_id == 0) {
1029 			cpu_desc[cpu].cpu_part_num = part_id;
1030 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1031 			break;
1032 		}
1033 	}
1034 
1035 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1036 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1037 
1038 	/* Save affinity for current CPU */
1039 	cpu_desc[cpu].mpidr = get_mpidr();
1040 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1041 
1042 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1043 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1044 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1045 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1046 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1047 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1048 	cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1049 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1050 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1051 
1052 	if (cpu != 0) {
1053 		/*
1054 		 * This code must run on one cpu at a time, but we are
1055 		 * not scheduling on the current core so implement a
1056 		 * simple spinlock.
1057 		 */
1058 		while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1059 			__asm __volatile("wfe" ::: "memory");
1060 
1061 		switch (cpu_aff_levels) {
1062 		case 0:
1063 			if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1064 			    CPU_AFF0(cpu_desc[0].mpidr))
1065 				cpu_aff_levels = 1;
1066 			/* FALLTHROUGH */
1067 		case 1:
1068 			if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1069 			    CPU_AFF1(cpu_desc[0].mpidr))
1070 				cpu_aff_levels = 2;
1071 			/* FALLTHROUGH */
1072 		case 2:
1073 			if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1074 			    CPU_AFF2(cpu_desc[0].mpidr))
1075 				cpu_aff_levels = 3;
1076 			/* FALLTHROUGH */
1077 		case 3:
1078 			if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1079 			    CPU_AFF3(cpu_desc[0].mpidr))
1080 				cpu_aff_levels = 4;
1081 			break;
1082 		}
1083 
1084 		if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1085 			cpu_print_regs |= PRINT_ID_AA64_AFR0;
1086 		if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1087 			cpu_print_regs |= PRINT_ID_AA64_AFR1;
1088 
1089 		if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1090 			cpu_print_regs |= PRINT_ID_AA64_DFR0;
1091 		if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1092 			cpu_print_regs |= PRINT_ID_AA64_DFR1;
1093 
1094 		if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1095 			cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1096 		if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1097 			cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1098 
1099 		if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1100 			cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1101 		if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1102 			cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1103 		if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1104 			cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1105 
1106 		if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1107 			cpu_print_regs |= PRINT_ID_AA64_PFR0;
1108 		if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1109 			cpu_print_regs |= PRINT_ID_AA64_PFR1;
1110 
1111 		/* Wake up the other CPUs */
1112 		atomic_store_rel_int(&ident_lock, 0);
1113 		__asm __volatile("sev" ::: "memory");
1114 	}
1115 }
1116