xref: /freebsd/sys/arm64/arm64/identcpu.c (revision 85732ac8)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47 
48 static int ident_lock;
49 
50 char machine[] = "arm64";
51 
52 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
53     "Machine class");
54 
55 /*
56  * Per-CPU affinity as provided in MPIDR_EL1
57  * Indexed by CPU number in logical order selected by the system.
58  * Relevant fields can be extracted using CPU_AFFn macros,
59  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
60  *
61  * Fields used by us:
62  * Aff1 - Cluster number
63  * Aff0 - CPU number in Aff1 cluster
64  */
65 uint64_t __cpu_affinity[MAXCPU];
66 static u_int cpu_aff_levels;
67 
68 struct cpu_desc {
69 	u_int		cpu_impl;
70 	u_int		cpu_part_num;
71 	u_int		cpu_variant;
72 	u_int		cpu_revision;
73 	const char	*cpu_impl_name;
74 	const char	*cpu_part_name;
75 
76 	uint64_t	mpidr;
77 	uint64_t	id_aa64afr0;
78 	uint64_t	id_aa64afr1;
79 	uint64_t	id_aa64dfr0;
80 	uint64_t	id_aa64dfr1;
81 	uint64_t	id_aa64isar0;
82 	uint64_t	id_aa64isar1;
83 	uint64_t	id_aa64mmfr0;
84 	uint64_t	id_aa64mmfr1;
85 	uint64_t	id_aa64mmfr2;
86 	uint64_t	id_aa64pfr0;
87 	uint64_t	id_aa64pfr1;
88 };
89 
90 struct cpu_desc cpu_desc[MAXCPU];
91 struct cpu_desc user_cpu_desc;
92 static u_int cpu_print_regs;
93 #define	PRINT_ID_AA64_AFR0	0x00000001
94 #define	PRINT_ID_AA64_AFR1	0x00000002
95 #define	PRINT_ID_AA64_DFR0	0x00000010
96 #define	PRINT_ID_AA64_DFR1	0x00000020
97 #define	PRINT_ID_AA64_ISAR0	0x00000100
98 #define	PRINT_ID_AA64_ISAR1	0x00000200
99 #define	PRINT_ID_AA64_MMFR0	0x00001000
100 #define	PRINT_ID_AA64_MMFR1	0x00002000
101 #define	PRINT_ID_AA64_MMFR2	0x00004000
102 #define	PRINT_ID_AA64_PFR0	0x00010000
103 #define	PRINT_ID_AA64_PFR1	0x00020000
104 
105 struct cpu_parts {
106 	u_int		part_id;
107 	const char	*part_name;
108 };
109 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
110 
111 struct cpu_implementers {
112 	u_int			impl_id;
113 	const char		*impl_name;
114 	/*
115 	 * Part number is implementation defined
116 	 * so each vendor will have its own set of values and names.
117 	 */
118 	const struct cpu_parts	*cpu_parts;
119 };
120 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
121 
122 /*
123  * Per-implementer table of (PartNum, CPU Name) pairs.
124  */
125 /* ARM Ltd. */
126 static const struct cpu_parts cpu_parts_arm[] = {
127 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
128 	{ CPU_PART_CORTEX_A35, "Cortex-A35" },
129 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
130 	{ CPU_PART_CORTEX_A55, "Cortex-A55" },
131 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
132 	{ CPU_PART_CORTEX_A72, "Cortex-A72" },
133 	{ CPU_PART_CORTEX_A73, "Cortex-A73" },
134 	{ CPU_PART_CORTEX_A75, "Cortex-A75" },
135 	CPU_PART_NONE,
136 };
137 /* Cavium */
138 static const struct cpu_parts cpu_parts_cavium[] = {
139 	{ CPU_PART_THUNDERX, "ThunderX" },
140 	{ CPU_PART_THUNDERX2, "ThunderX2" },
141 	CPU_PART_NONE,
142 };
143 
144 /* Unknown */
145 static const struct cpu_parts cpu_parts_none[] = {
146 	CPU_PART_NONE,
147 };
148 
149 /*
150  * Implementers table.
151  */
152 const struct cpu_implementers cpu_implementers[] = {
153 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
154 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
155 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
156 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
157 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
158 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
159 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
160 	{ CPU_IMPL_APM,		"APM",		cpu_parts_none },
161 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
162 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
163 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
164 	CPU_IMPLEMENTER_NONE,
165 };
166 
167 #define	MRS_TYPE_MASK		0xf
168 #define	MRS_INVALID		0
169 #define	MRS_EXACT		1
170 #define	MRS_EXACT_VAL(x)	(MRS_EXACT | ((x) << 4))
171 #define	MRS_EXACT_FIELD(x)	((x) >> 4)
172 #define	MRS_LOWER		2
173 
174 struct mrs_field {
175 	bool		sign;
176 	u_int		type;
177 	u_int		shift;
178 };
179 
180 #define	MRS_FIELD(_sign, _type, _shift)					\
181 	{								\
182 		.sign = (_sign),					\
183 		.type = (_type),					\
184 		.shift = (_shift),					\
185 	}
186 
187 #define	MRS_FIELD_END	{ .type = MRS_INVALID, }
188 
189 static struct mrs_field id_aa64isar0_fields[] = {
190 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_DP_SHIFT),
191 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM4_SHIFT),
192 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM3_SHIFT),
193 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA3_SHIFT),
194 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_RDM_SHIFT),
195 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_ATOMIC_SHIFT),
196 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_CRC32_SHIFT),
197 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA2_SHIFT),
198 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA1_SHIFT),
199 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_AES_SHIFT),
200 	MRS_FIELD_END,
201 };
202 
203 static struct mrs_field id_aa64isar1_fields[] = {
204 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPI_SHIFT),
205 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPA_SHIFT),
206 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_LRCPC_SHIFT),
207 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_FCMA_SHIFT),
208 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_JSCVT_SHIFT),
209 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_API_SHIFT),
210 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_APA_SHIFT),
211 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_DPB_SHIFT),
212 	MRS_FIELD_END,
213 };
214 
215 static struct mrs_field id_aa64pfr0_fields[] = {
216 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT),
217 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT),
218 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT),
219 	MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_ADV_SIMD_SHIFT),
220 	MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_FP_SHIFT),
221 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT),
222 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT),
223 	MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT),
224 	MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT),
225 	MRS_FIELD_END,
226 };
227 
228 static struct mrs_field id_aa64dfr0_fields[] = {
229 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMS_VER_SHIFT),
230 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPS_SHIFT),
231 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPS_SHIFT),
232 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPS_SHIFT),
233 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMU_VER_SHIFT),
234 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TRACE_VER_SHIFT),
235 	MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DEBUG_VER_SHIFT),
236 	MRS_FIELD_END,
237 };
238 
239 struct mrs_user_reg {
240 	u_int		CRm;
241 	u_int		Op2;
242 	size_t		offset;
243 	struct mrs_field *fields;
244 };
245 
246 static struct mrs_user_reg user_regs[] = {
247 	{	/* id_aa64isar0_el1 */
248 		.CRm = 6,
249 		.Op2 = 0,
250 		.offset = __offsetof(struct cpu_desc, id_aa64isar0),
251 		.fields = id_aa64isar0_fields,
252 	},
253 	{	/* id_aa64isar1_el1 */
254 		.CRm = 6,
255 		.Op2 = 1,
256 		.offset = __offsetof(struct cpu_desc, id_aa64isar1),
257 		.fields = id_aa64isar1_fields,
258 	},
259 	{	/* id_aa64pfr0_el1 */
260 		.CRm = 4,
261 		.Op2 = 0,
262 		.offset = __offsetof(struct cpu_desc, id_aa64pfr0),
263 		.fields = id_aa64pfr0_fields,
264 	},
265 	{	/* id_aa64dfr0_el1 */
266 		.CRm = 5,
267 		.Op2 = 0,
268 		.offset = __offsetof(struct cpu_desc, id_aa64dfr0),
269 		.fields = id_aa64dfr0_fields,
270 	},
271 };
272 
273 #define	CPU_DESC_FIELD(desc, idx)					\
274     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
275 
276 static int
277 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
278     uint32_t esr)
279 {
280 	uint64_t value;
281 	int CRm, Op2, i, reg;
282 
283 	if ((insn & MRS_MASK) != MRS_VALUE)
284 		return (0);
285 
286 	/*
287 	 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
288 	 * These are in the EL1 CPU identification space.
289 	 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
290 	 * CRm == {4-7} holds the ID_AA64 registers.
291 	 *
292 	 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
293 	 * Table D9-2 System instruction encodings for non-Debug System
294 	 * register accesses.
295 	 */
296 	if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
297 		return (0);
298 
299 	CRm = mrs_CRm(insn);
300 	if (CRm > 7 || (CRm < 4 && CRm != 0))
301 		return (0);
302 
303 	Op2 = mrs_Op2(insn);
304 	value = 0;
305 
306 	for (i = 0; i < nitems(user_regs); i++) {
307 		if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
308 			value = CPU_DESC_FIELD(user_cpu_desc, i);
309 			break;
310 		}
311 	}
312 
313 	if (CRm == 0) {
314 		switch (Op2) {
315 		case 0:
316 			value = READ_SPECIALREG(midr_el1);
317 			break;
318 		case 5:
319 			value = READ_SPECIALREG(mpidr_el1);
320 			break;
321 		case 6:
322 			value = READ_SPECIALREG(revidr_el1);
323 			break;
324 		default:
325 			return (0);
326 		}
327 	}
328 
329 	/*
330 	 * We will handle this instruction, move to the next so we
331 	 * don't trap here again.
332 	 */
333 	frame->tf_elr += INSN_SIZE;
334 
335 	reg = MRS_REGISTER(insn);
336 	/* If reg is 31 then write to xzr, i.e. do nothing */
337 	if (reg == 31)
338 		return (1);
339 
340 	if (reg < nitems(frame->tf_x))
341 		frame->tf_x[reg] = value;
342 	else if (reg == 30)
343 		frame->tf_lr = value;
344 
345 	return (1);
346 }
347 
348 static void
349 update_user_regs(u_int cpu)
350 {
351 	struct mrs_field *fields;
352 	uint64_t cur, value;
353 	int i, j, cur_field, new_field;
354 
355 	for (i = 0; i < nitems(user_regs); i++) {
356 		value = CPU_DESC_FIELD(cpu_desc[cpu], i);
357 		if (cpu == 0)
358 			cur = value;
359 		else
360 			cur = CPU_DESC_FIELD(user_cpu_desc, i);
361 
362 		fields = user_regs[i].fields;
363 		for (j = 0; fields[j].type != 0; j++) {
364 			switch (fields[j].type & MRS_TYPE_MASK) {
365 			case MRS_EXACT:
366 				cur &= ~(0xfu << fields[j].shift);
367 				cur |=
368 				    (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
369 				    fields[j].shift;
370 				break;
371 			case MRS_LOWER:
372 				new_field = (value >> fields[j].shift) & 0xf;
373 				cur_field = (cur >> fields[j].shift) & 0xf;
374 				if ((fields[j].sign &&
375 				     (int)new_field < (int)cur_field) ||
376 				    (!fields[j].sign &&
377 				     (u_int)new_field < (u_int)cur_field)) {
378 					cur &= ~(0xfu << fields[j].shift);
379 					cur |= new_field << fields[j].shift;
380 				}
381 				break;
382 			default:
383 				panic("Invalid field type: %d", fields[j].type);
384 			}
385 		}
386 
387 		CPU_DESC_FIELD(user_cpu_desc, i) = cur;
388 	}
389 }
390 
391 static void
392 identify_cpu_sysinit(void *dummy __unused)
393 {
394 	int cpu;
395 
396 	/* Create a user visible cpu description with safe values */
397 	memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
398 	/* Safe values for these registers */
399 	user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_ADV_SIMD_NONE |
400 	    ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
401 	user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DEBUG_VER_8;
402 
403 
404 	CPU_FOREACH(cpu) {
405 		print_cpu_features(cpu);
406 		update_user_regs(cpu);
407 	}
408 
409 	install_undef_handler(true, user_mrs_handler);
410 }
411 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
412 
413 void
414 print_cpu_features(u_int cpu)
415 {
416 	struct sbuf *sb;
417 	int printed;
418 
419 	sb = sbuf_new_auto();
420 	sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
421 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
422 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
423 
424 	sbuf_cat(sb, " affinity:");
425 	switch(cpu_aff_levels) {
426 	default:
427 	case 4:
428 		sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
429 		/* FALLTHROUGH */
430 	case 3:
431 		sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
432 		/* FALLTHROUGH */
433 	case 2:
434 		sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
435 		/* FALLTHROUGH */
436 	case 1:
437 	case 0: /* On UP this will be zero */
438 		sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
439 		break;
440 	}
441 	sbuf_finish(sb);
442 	printf("%s\n", sbuf_data(sb));
443 	sbuf_clear(sb);
444 
445 	/*
446 	 * There is a hardware errata where, if one CPU is performing a TLB
447 	 * invalidation while another is performing a store-exclusive the
448 	 * store-exclusive may return the wrong status. A workaround seems
449 	 * to be to use an IPI to invalidate on each CPU, however given the
450 	 * limited number of affected units (pass 1.1 is the evaluation
451 	 * hardware revision), and the lack of information from Cavium
452 	 * this has not been implemented.
453 	 *
454 	 * At the time of writing this the only information is from:
455 	 * https://lkml.org/lkml/2016/8/4/722
456 	 */
457 	/*
458 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
459 	 * triggers on pass 2.0+.
460 	 */
461 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
462 	    CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
463 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
464 		    "hardware bugs that may cause the incorrect operation of "
465 		    "atomic operations.\n");
466 
467 	if (cpu != 0 && cpu_print_regs == 0)
468 		return;
469 
470 #define SEP_STR	((printed++) == 0) ? "" : ","
471 
472 	/* AArch64 Instruction Set Attribute Register 0 */
473 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
474 		printed = 0;
475 		sbuf_printf(sb, " Instruction Set Attributes 0 = <");
476 
477 		switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
478 		case ID_AA64ISAR0_RDM_NONE:
479 			break;
480 		case ID_AA64ISAR0_RDM_IMPL:
481 			sbuf_printf(sb, "%sRDM", SEP_STR);
482 			break;
483 		default:
484 			sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
485 		}
486 
487 		switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
488 		case ID_AA64ISAR0_ATOMIC_NONE:
489 			break;
490 		case ID_AA64ISAR0_ATOMIC_IMPL:
491 			sbuf_printf(sb, "%sAtomic", SEP_STR);
492 			break;
493 		default:
494 			sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
495 		}
496 
497 		switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
498 		case ID_AA64ISAR0_AES_NONE:
499 			break;
500 		case ID_AA64ISAR0_AES_BASE:
501 			sbuf_printf(sb, "%sAES", SEP_STR);
502 			break;
503 		case ID_AA64ISAR0_AES_PMULL:
504 			sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
505 			break;
506 		default:
507 			sbuf_printf(sb, "%sUnknown AES", SEP_STR);
508 			break;
509 		}
510 
511 		switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
512 		case ID_AA64ISAR0_SHA1_NONE:
513 			break;
514 		case ID_AA64ISAR0_SHA1_BASE:
515 			sbuf_printf(sb, "%sSHA1", SEP_STR);
516 			break;
517 		default:
518 			sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
519 			break;
520 		}
521 
522 		switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
523 		case ID_AA64ISAR0_SHA2_NONE:
524 			break;
525 		case ID_AA64ISAR0_SHA2_BASE:
526 			sbuf_printf(sb, "%sSHA2", SEP_STR);
527 			break;
528 		case ID_AA64ISAR0_SHA2_512:
529 			sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
530 			break;
531 		default:
532 			sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
533 			break;
534 		}
535 
536 		switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
537 		case ID_AA64ISAR0_CRC32_NONE:
538 			break;
539 		case ID_AA64ISAR0_CRC32_BASE:
540 			sbuf_printf(sb, "%sCRC32", SEP_STR);
541 			break;
542 		default:
543 			sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
544 			break;
545 		}
546 
547 		switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
548 		case ID_AA64ISAR0_SHA3_NONE:
549 			break;
550 		case ID_AA64ISAR0_SHA3_IMPL:
551 			sbuf_printf(sb, "%sSHA3", SEP_STR);
552 			break;
553 		default:
554 			sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
555 			break;
556 		}
557 
558 		switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
559 		case ID_AA64ISAR0_SM3_NONE:
560 			break;
561 		case ID_AA64ISAR0_SM3_IMPL:
562 			sbuf_printf(sb, "%sSM3", SEP_STR);
563 			break;
564 		default:
565 			sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
566 			break;
567 		}
568 
569 		switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
570 		case ID_AA64ISAR0_SM4_NONE:
571 			break;
572 		case ID_AA64ISAR0_SM4_IMPL:
573 			sbuf_printf(sb, "%sSM4", SEP_STR);
574 			break;
575 		default:
576 			sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
577 			break;
578 		}
579 
580 		switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
581 		case ID_AA64ISAR0_DP_NONE:
582 			break;
583 		case ID_AA64ISAR0_DP_IMPL:
584 			sbuf_printf(sb, "%sDotProd", SEP_STR);
585 			break;
586 		default:
587 			sbuf_printf(sb, "%sUnknown DP", SEP_STR);
588 			break;
589 		}
590 
591 		if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
592 			sbuf_printf(sb, "%s%#lx", SEP_STR,
593 			    cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
594 
595 		sbuf_finish(sb);
596 		printf("%s>\n", sbuf_data(sb));
597 		sbuf_clear(sb);
598 	}
599 
600 	/* AArch64 Instruction Set Attribute Register 1 */
601 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
602 		printed = 0;
603 		sbuf_printf(sb, " Instruction Set Attributes 1 = <");
604 
605 		switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
606 		case ID_AA64ISAR1_GPI_NONE:
607 			break;
608 		case ID_AA64ISAR1_GPI_IMPL:
609 			sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
610 			break;
611 		default:
612 			sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
613 			break;
614 		}
615 
616 		switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
617 		case ID_AA64ISAR1_GPA_NONE:
618 			break;
619 		case ID_AA64ISAR1_GPA_IMPL:
620 			sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
621 			break;
622 		default:
623 			sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
624 			break;
625 		}
626 
627 		switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
628 		case ID_AA64ISAR1_LRCPC_NONE:
629 			break;
630 		case ID_AA64ISAR1_LRCPC_IMPL:
631 			sbuf_printf(sb, "%sRCpc", SEP_STR);
632 			break;
633 		default:
634 			sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
635 			break;
636 		}
637 
638 		switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
639 		case ID_AA64ISAR1_FCMA_NONE:
640 			break;
641 		case ID_AA64ISAR1_FCMA_IMPL:
642 			sbuf_printf(sb, "%sFCMA", SEP_STR);
643 			break;
644 		default:
645 			sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
646 			break;
647 		}
648 
649 		switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
650 		case ID_AA64ISAR1_JSCVT_NONE:
651 			break;
652 		case ID_AA64ISAR1_JSCVT_IMPL:
653 			sbuf_printf(sb, "%sJS Conv", SEP_STR);
654 			break;
655 		default:
656 			sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
657 			break;
658 		}
659 
660 		switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
661 		case ID_AA64ISAR1_API_NONE:
662 			break;
663 		case ID_AA64ISAR1_API_IMPL:
664 			sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
665 			break;
666 		default:
667 			sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
668 			break;
669 		}
670 
671 		switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
672 		case ID_AA64ISAR1_APA_NONE:
673 			break;
674 		case ID_AA64ISAR1_APA_IMPL:
675 			sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
676 			break;
677 		default:
678 			sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
679 			break;
680 		}
681 
682 		switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
683 		case ID_AA64ISAR1_DPB_NONE:
684 			break;
685 		case ID_AA64ISAR1_DPB_IMPL:
686 			sbuf_printf(sb, "%sDC CVAP", SEP_STR);
687 			break;
688 		default:
689 			sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
690 			break;
691 		}
692 
693 		if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
694 			sbuf_printf(sb, "%s%#lx", SEP_STR,
695 			    cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
696 		sbuf_finish(sb);
697 		printf("%s>\n", sbuf_data(sb));
698 		sbuf_clear(sb);
699 	}
700 
701 	/* AArch64 Processor Feature Register 0 */
702 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
703 		printed = 0;
704 		sbuf_printf(sb, "         Processor Features 0 = <");
705 
706 		switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
707 		case ID_AA64PFR0_SVE_NONE:
708 			break;
709 		case ID_AA64PFR0_SVE_IMPL:
710 			sbuf_printf(sb, "%sSVE", SEP_STR);
711 			break;
712 		default:
713 			sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
714 			break;
715 		}
716 
717 		switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
718 		case ID_AA64PFR0_RAS_NONE:
719 			break;
720 		case ID_AA64PFR0_RAS_V1:
721 			sbuf_printf(sb, "%sRASv1", SEP_STR);
722 			break;
723 		default:
724 			sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
725 			break;
726 		}
727 
728 		switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
729 		case ID_AA64PFR0_GIC_CPUIF_NONE:
730 			break;
731 		case ID_AA64PFR0_GIC_CPUIF_EN:
732 			sbuf_printf(sb, "%sGIC", SEP_STR);
733 			break;
734 		default:
735 			sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
736 			break;
737 		}
738 
739 		switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
740 		case ID_AA64PFR0_ADV_SIMD_NONE:
741 			break;
742 		case ID_AA64PFR0_ADV_SIMD_IMPL:
743 			sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
744 			break;
745 		case ID_AA64PFR0_ADV_SIMD_HP:
746 			sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
747 			break;
748 		default:
749 			sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
750 			break;
751 		}
752 
753 		switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
754 		case ID_AA64PFR0_FP_NONE:
755 			break;
756 		case ID_AA64PFR0_FP_IMPL:
757 			sbuf_printf(sb, "%sFloat", SEP_STR);
758 			break;
759 		case ID_AA64PFR0_FP_HP:
760 			sbuf_printf(sb, "%sFloat+HP", SEP_STR);
761 			break;
762 		default:
763 			sbuf_printf(sb, "%sUnknown Float", SEP_STR);
764 			break;
765 		}
766 
767 		switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
768 		case ID_AA64PFR0_EL3_NONE:
769 			sbuf_printf(sb, "%sNo EL3", SEP_STR);
770 			break;
771 		case ID_AA64PFR0_EL3_64:
772 			sbuf_printf(sb, "%sEL3", SEP_STR);
773 			break;
774 		case ID_AA64PFR0_EL3_64_32:
775 			sbuf_printf(sb, "%sEL3 32", SEP_STR);
776 			break;
777 		default:
778 			sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
779 			break;
780 		}
781 
782 		switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
783 		case ID_AA64PFR0_EL2_NONE:
784 			sbuf_printf(sb, "%sNo EL2", SEP_STR);
785 			break;
786 		case ID_AA64PFR0_EL2_64:
787 			sbuf_printf(sb, "%sEL2", SEP_STR);
788 			break;
789 		case ID_AA64PFR0_EL2_64_32:
790 			sbuf_printf(sb, "%sEL2 32", SEP_STR);
791 			break;
792 		default:
793 			sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
794 			break;
795 		}
796 
797 		switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
798 		case ID_AA64PFR0_EL1_64:
799 			sbuf_printf(sb, "%sEL1", SEP_STR);
800 			break;
801 		case ID_AA64PFR0_EL1_64_32:
802 			sbuf_printf(sb, "%sEL1 32", SEP_STR);
803 			break;
804 		default:
805 			sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
806 			break;
807 		}
808 
809 		switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
810 		case ID_AA64PFR0_EL0_64:
811 			sbuf_printf(sb, "%sEL0", SEP_STR);
812 			break;
813 		case ID_AA64PFR0_EL0_64_32:
814 			sbuf_printf(sb, "%sEL0 32", SEP_STR);
815 			break;
816 		default:
817 			sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
818 			break;
819 		}
820 
821 		if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
822 			sbuf_printf(sb, "%s%#lx", SEP_STR,
823 			    cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
824 
825 		sbuf_finish(sb);
826 		printf("%s>\n", sbuf_data(sb));
827 		sbuf_clear(sb);
828 	}
829 
830 	/* AArch64 Processor Feature Register 1 */
831 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
832 		printf("         Processor Features 1 = <%#lx>\n",
833 		    cpu_desc[cpu].id_aa64pfr1);
834 	}
835 
836 	/* AArch64 Memory Model Feature Register 0 */
837 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
838 		printed = 0;
839 		sbuf_printf(sb, "      Memory Model Features 0 = <");
840 		switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
841 		case ID_AA64MMFR0_TGRAN4_NONE:
842 			break;
843 		case ID_AA64MMFR0_TGRAN4_IMPL:
844 			sbuf_printf(sb, "%s4k Granule", SEP_STR);
845 			break;
846 		default:
847 			sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
848 			break;
849 		}
850 
851 		switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
852 		case ID_AA64MMFR0_TGRAN16_NONE:
853 			break;
854 		case ID_AA64MMFR0_TGRAN16_IMPL:
855 			sbuf_printf(sb, "%s16k Granule", SEP_STR);
856 			break;
857 		default:
858 			sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
859 			break;
860 		}
861 
862 		switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
863 		case ID_AA64MMFR0_TGRAN64_NONE:
864 			break;
865 		case ID_AA64MMFR0_TGRAN64_IMPL:
866 			sbuf_printf(sb, "%s64k Granule", SEP_STR);
867 			break;
868 		default:
869 			sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
870 			break;
871 		}
872 
873 		switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
874 		case ID_AA64MMFR0_BIGEND_FIXED:
875 			break;
876 		case ID_AA64MMFR0_BIGEND_MIXED:
877 			sbuf_printf(sb, "%sMixedEndian", SEP_STR);
878 			break;
879 		default:
880 			sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
881 			break;
882 		}
883 
884 		switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
885 		case ID_AA64MMFR0_BIGEND_EL0_FIXED:
886 			break;
887 		case ID_AA64MMFR0_BIGEND_EL0_MIXED:
888 			sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
889 			break;
890 		default:
891 			sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
892 			break;
893 		}
894 
895 		switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
896 		case ID_AA64MMFR0_S_NS_MEM_NONE:
897 			break;
898 		case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
899 			sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
900 			break;
901 		default:
902 			sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
903 			break;
904 		}
905 
906 		switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
907 		case ID_AA64MMFR0_ASID_BITS_8:
908 			sbuf_printf(sb, "%s8bit ASID", SEP_STR);
909 			break;
910 		case ID_AA64MMFR0_ASID_BITS_16:
911 			sbuf_printf(sb, "%s16bit ASID", SEP_STR);
912 			break;
913 		default:
914 			sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
915 			break;
916 		}
917 
918 		switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
919 		case ID_AA64MMFR0_PA_RANGE_4G:
920 			sbuf_printf(sb, "%s4GB PA", SEP_STR);
921 			break;
922 		case ID_AA64MMFR0_PA_RANGE_64G:
923 			sbuf_printf(sb, "%s64GB PA", SEP_STR);
924 			break;
925 		case ID_AA64MMFR0_PA_RANGE_1T:
926 			sbuf_printf(sb, "%s1TB PA", SEP_STR);
927 			break;
928 		case ID_AA64MMFR0_PA_RANGE_4T:
929 			sbuf_printf(sb, "%s4TB PA", SEP_STR);
930 			break;
931 		case ID_AA64MMFR0_PA_RANGE_16T:
932 			sbuf_printf(sb, "%s16TB PA", SEP_STR);
933 			break;
934 		case ID_AA64MMFR0_PA_RANGE_256T:
935 			sbuf_printf(sb, "%s256TB PA", SEP_STR);
936 			break;
937 		case ID_AA64MMFR0_PA_RANGE_4P:
938 			sbuf_printf(sb, "%s4PB PA", SEP_STR);
939 			break;
940 		default:
941 			sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
942 			break;
943 		}
944 
945 		if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
946 			sbuf_printf(sb, "%s%#lx", SEP_STR,
947 			    cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
948 		sbuf_finish(sb);
949 		printf("%s>\n", sbuf_data(sb));
950 		sbuf_clear(sb);
951 	}
952 
953 	/* AArch64 Memory Model Feature Register 1 */
954 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
955 		printed = 0;
956 		sbuf_printf(sb, "      Memory Model Features 1 = <");
957 
958 		switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
959 		case ID_AA64MMFR1_XNX_NONE:
960 			break;
961 		case ID_AA64MMFR1_XNX_IMPL:
962 			sbuf_printf(sb, "%sEL2 XN", SEP_STR);
963 			break;
964 		default:
965 			sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
966 			break;
967 		}
968 
969 		switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
970 		case ID_AA64MMFR1_SPEC_SEI_NONE:
971 			break;
972 		case ID_AA64MMFR1_SPEC_SEI_IMPL:
973 			sbuf_printf(sb, "%sSpecSEI", SEP_STR);
974 			break;
975 		default:
976 			sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
977 			break;
978 		}
979 
980 		switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
981 		case ID_AA64MMFR1_PAN_NONE:
982 			break;
983 		case ID_AA64MMFR1_PAN_IMPL:
984 			sbuf_printf(sb, "%sPAN", SEP_STR);
985 			break;
986 		case ID_AA64MMFR1_PAN_ATS1E1:
987 			sbuf_printf(sb, "%sPAN+AT", SEP_STR);
988 			break;
989 		default:
990 			sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
991 			break;
992 		}
993 
994 		switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
995 		case ID_AA64MMFR1_LO_NONE:
996 			break;
997 		case ID_AA64MMFR1_LO_IMPL:
998 			sbuf_printf(sb, "%sLO", SEP_STR);
999 			break;
1000 		default:
1001 			sbuf_printf(sb, "%sUnknown LO", SEP_STR);
1002 			break;
1003 		}
1004 
1005 		switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
1006 		case ID_AA64MMFR1_HPDS_NONE:
1007 			break;
1008 		case ID_AA64MMFR1_HPDS_HPD:
1009 			sbuf_printf(sb, "%sHPDS", SEP_STR);
1010 			break;
1011 		case ID_AA64MMFR1_HPDS_TTPBHA:
1012 			sbuf_printf(sb, "%sTTPBHA", SEP_STR);
1013 			break;
1014 		default:
1015 			sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
1016 			break;
1017 		}
1018 
1019 		switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
1020 		case ID_AA64MMFR1_VH_NONE:
1021 			break;
1022 		case ID_AA64MMFR1_VH_IMPL:
1023 			sbuf_printf(sb, "%sVHE", SEP_STR);
1024 			break;
1025 		default:
1026 			sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
1027 			break;
1028 		}
1029 
1030 		switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
1031 		case ID_AA64MMFR1_VMIDBITS_8:
1032 			break;
1033 		case ID_AA64MMFR1_VMIDBITS_16:
1034 			sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
1035 			break;
1036 		default:
1037 			sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
1038 			break;
1039 		}
1040 
1041 		switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
1042 		case ID_AA64MMFR1_HAFDBS_NONE:
1043 			break;
1044 		case ID_AA64MMFR1_HAFDBS_AF:
1045 			sbuf_printf(sb, "%sAF", SEP_STR);
1046 			break;
1047 		case ID_AA64MMFR1_HAFDBS_AF_DBS:
1048 			sbuf_printf(sb, "%sAF+DBS", SEP_STR);
1049 			break;
1050 		default:
1051 			sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
1052 			break;
1053 		}
1054 
1055 		if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
1056 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1057 			    cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
1058 		sbuf_finish(sb);
1059 		printf("%s>\n", sbuf_data(sb));
1060 		sbuf_clear(sb);
1061 	}
1062 
1063 	/* AArch64 Memory Model Feature Register 2 */
1064 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
1065 		printed = 0;
1066 		sbuf_printf(sb, "      Memory Model Features 2 = <");
1067 
1068 		switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
1069 		case ID_AA64MMFR2_NV_NONE:
1070 			break;
1071 		case ID_AA64MMFR2_NV_IMPL:
1072 			sbuf_printf(sb, "%sNestedVirt", SEP_STR);
1073 			break;
1074 		default:
1075 			sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
1076 			break;
1077 		}
1078 
1079 		switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
1080 		case ID_AA64MMFR2_CCIDX_32:
1081 			sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
1082 			break;
1083 		case ID_AA64MMFR2_CCIDX_64:
1084 			sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
1085 			break;
1086 		default:
1087 			sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
1088 			break;
1089 		}
1090 
1091 		switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
1092 		case ID_AA64MMFR2_VA_RANGE_48:
1093 			sbuf_printf(sb, "%s48b VA", SEP_STR);
1094 			break;
1095 		case ID_AA64MMFR2_VA_RANGE_52:
1096 			sbuf_printf(sb, "%s52b VA", SEP_STR);
1097 			break;
1098 		default:
1099 			sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
1100 			break;
1101 		}
1102 
1103 		switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
1104 		case ID_AA64MMFR2_IESB_NONE:
1105 			break;
1106 		case ID_AA64MMFR2_IESB_IMPL:
1107 			sbuf_printf(sb, "%sIESB", SEP_STR);
1108 			break;
1109 		default:
1110 			sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
1111 			break;
1112 		}
1113 
1114 		switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
1115 		case ID_AA64MMFR2_LSM_NONE:
1116 			break;
1117 		case ID_AA64MMFR2_LSM_IMPL:
1118 			sbuf_printf(sb, "%sLSM", SEP_STR);
1119 			break;
1120 		default:
1121 			sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
1122 			break;
1123 		}
1124 
1125 		switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
1126 		case ID_AA64MMFR2_UAO_NONE:
1127 			break;
1128 		case ID_AA64MMFR2_UAO_IMPL:
1129 			sbuf_printf(sb, "%sUAO", SEP_STR);
1130 			break;
1131 		default:
1132 			sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
1133 			break;
1134 		}
1135 
1136 		switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
1137 		case ID_AA64MMFR2_CNP_NONE:
1138 			break;
1139 		case ID_AA64MMFR2_CNP_IMPL:
1140 			sbuf_printf(sb, "%sCnP", SEP_STR);
1141 			break;
1142 		default:
1143 			sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1144 			break;
1145 		}
1146 
1147 		if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1148 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1149 			    cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1150 		sbuf_finish(sb);
1151 		printf("%s>\n", sbuf_data(sb));
1152 		sbuf_clear(sb);
1153 	}
1154 
1155 	/* AArch64 Debug Feature Register 0 */
1156 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1157 		printed = 0;
1158 		sbuf_printf(sb, "             Debug Features 0 = <");
1159 		switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
1160 		case ID_AA64DFR0_PMS_VER_NONE:
1161 			break;
1162 		case ID_AA64DFR0_PMS_VER_V1:
1163 			sbuf_printf(sb, "%sSPE v1", SEP_STR);
1164 			break;
1165 		default:
1166 			sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1167 			break;
1168 		}
1169 
1170 		sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1171 		    ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
1172 
1173 		sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1174 		    ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
1175 
1176 		sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1177 		    ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
1178 
1179 		switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
1180 		case ID_AA64DFR0_PMU_VER_NONE:
1181 			break;
1182 		case ID_AA64DFR0_PMU_VER_3:
1183 			sbuf_printf(sb, "%sPMUv3", SEP_STR);
1184 			break;
1185 		case ID_AA64DFR0_PMU_VER_3_1:
1186 			sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1187 			break;
1188 		case ID_AA64DFR0_PMU_VER_IMPL:
1189 			sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1190 			break;
1191 		default:
1192 			sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1193 			break;
1194 		}
1195 
1196 		switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
1197 		case ID_AA64DFR0_TRACE_VER_NONE:
1198 			break;
1199 		case ID_AA64DFR0_TRACE_VER_IMPL:
1200 			sbuf_printf(sb, "%sTrace", SEP_STR);
1201 			break;
1202 		default:
1203 			sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1204 			break;
1205 		}
1206 
1207 		switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
1208 		case ID_AA64DFR0_DEBUG_VER_8:
1209 			sbuf_printf(sb, "%sDebug v8", SEP_STR);
1210 			break;
1211 		case ID_AA64DFR0_DEBUG_VER_8_VHE:
1212 			sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1213 			break;
1214 		case ID_AA64DFR0_DEBUG_VER_8_2:
1215 			sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1216 			break;
1217 		default:
1218 			sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1219 			break;
1220 		}
1221 
1222 		if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1223 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1224 			    cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1225 		sbuf_finish(sb);
1226 		printf("%s>\n", sbuf_data(sb));
1227 		sbuf_clear(sb);
1228 	}
1229 
1230 	/* AArch64 Memory Model Feature Register 1 */
1231 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1232 		printf("             Debug Features 1 = <%#lx>\n",
1233 		    cpu_desc[cpu].id_aa64dfr1);
1234 	}
1235 
1236 	/* AArch64 Auxiliary Feature Register 0 */
1237 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1238 		printf("         Auxiliary Features 0 = <%#lx>\n",
1239 		    cpu_desc[cpu].id_aa64afr0);
1240 	}
1241 
1242 	/* AArch64 Auxiliary Feature Register 1 */
1243 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1244 		printf("         Auxiliary Features 1 = <%#lx>\n",
1245 		    cpu_desc[cpu].id_aa64afr1);
1246 	}
1247 
1248 	sbuf_delete(sb);
1249 	sb = NULL;
1250 #undef SEP_STR
1251 }
1252 
1253 void
1254 identify_cpu(void)
1255 {
1256 	u_int midr;
1257 	u_int impl_id;
1258 	u_int part_id;
1259 	u_int cpu;
1260 	size_t i;
1261 	const struct cpu_parts *cpu_partsp = NULL;
1262 
1263 	cpu = PCPU_GET(cpuid);
1264 	midr = get_midr();
1265 
1266 	/*
1267 	 * Store midr to pcpu to allow fast reading
1268 	 * from EL0, EL1 and assembly code.
1269 	 */
1270 	PCPU_SET(midr, midr);
1271 
1272 	impl_id = CPU_IMPL(midr);
1273 	for (i = 0; i < nitems(cpu_implementers); i++) {
1274 		if (impl_id == cpu_implementers[i].impl_id ||
1275 		    cpu_implementers[i].impl_id == 0) {
1276 			cpu_desc[cpu].cpu_impl = impl_id;
1277 			cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1278 			cpu_partsp = cpu_implementers[i].cpu_parts;
1279 			break;
1280 		}
1281 	}
1282 
1283 	part_id = CPU_PART(midr);
1284 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
1285 		if (part_id == cpu_partsp[i].part_id ||
1286 		    cpu_partsp[i].part_id == 0) {
1287 			cpu_desc[cpu].cpu_part_num = part_id;
1288 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1289 			break;
1290 		}
1291 	}
1292 
1293 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1294 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1295 
1296 	/* Save affinity for current CPU */
1297 	cpu_desc[cpu].mpidr = get_mpidr();
1298 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1299 
1300 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1301 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1302 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1303 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1304 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1305 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1306 	cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1307 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1308 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1309 
1310 	if (cpu != 0) {
1311 		/*
1312 		 * This code must run on one cpu at a time, but we are
1313 		 * not scheduling on the current core so implement a
1314 		 * simple spinlock.
1315 		 */
1316 		while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1317 			__asm __volatile("wfe" ::: "memory");
1318 
1319 		switch (cpu_aff_levels) {
1320 		case 0:
1321 			if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1322 			    CPU_AFF0(cpu_desc[0].mpidr))
1323 				cpu_aff_levels = 1;
1324 			/* FALLTHROUGH */
1325 		case 1:
1326 			if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1327 			    CPU_AFF1(cpu_desc[0].mpidr))
1328 				cpu_aff_levels = 2;
1329 			/* FALLTHROUGH */
1330 		case 2:
1331 			if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1332 			    CPU_AFF2(cpu_desc[0].mpidr))
1333 				cpu_aff_levels = 3;
1334 			/* FALLTHROUGH */
1335 		case 3:
1336 			if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1337 			    CPU_AFF3(cpu_desc[0].mpidr))
1338 				cpu_aff_levels = 4;
1339 			break;
1340 		}
1341 
1342 		if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1343 			cpu_print_regs |= PRINT_ID_AA64_AFR0;
1344 		if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1345 			cpu_print_regs |= PRINT_ID_AA64_AFR1;
1346 
1347 		if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1348 			cpu_print_regs |= PRINT_ID_AA64_DFR0;
1349 		if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1350 			cpu_print_regs |= PRINT_ID_AA64_DFR1;
1351 
1352 		if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1353 			cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1354 		if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1355 			cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1356 
1357 		if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1358 			cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1359 		if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1360 			cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1361 		if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1362 			cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1363 
1364 		if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1365 			cpu_print_regs |= PRINT_ID_AA64_PFR0;
1366 		if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1367 			cpu_print_regs |= PRINT_ID_AA64_PFR1;
1368 
1369 		/* Wake up the other CPUs */
1370 		atomic_store_rel_int(&ident_lock, 0);
1371 		__asm __volatile("sev" ::: "memory");
1372 	}
1373 }
1374