xref: /freebsd/sys/arm64/arm64/identcpu.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47 
48 static int ident_lock;
49 
50 char machine[] = "arm64";
51 
52 #ifdef SCTL_MASK32
53 extern int adaptive_machine_arch;
54 #endif
55 
56 static int
57 sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
58 {
59 #ifdef SCTL_MASK32
60 	static const char machine32[] = "arm";
61 #endif
62 	int error;
63 #ifdef SCTL_MASK32
64 	if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
65 		error = SYSCTL_OUT(req, machine32, sizeof(machine32));
66 	else
67 #endif
68 		error = SYSCTL_OUT(req, machine, sizeof(machine));
69 	return (error);
70 }
71 
72 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
73 	CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
74 
75 /*
76  * Per-CPU affinity as provided in MPIDR_EL1
77  * Indexed by CPU number in logical order selected by the system.
78  * Relevant fields can be extracted using CPU_AFFn macros,
79  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
80  *
81  * Fields used by us:
82  * Aff1 - Cluster number
83  * Aff0 - CPU number in Aff1 cluster
84  */
85 uint64_t __cpu_affinity[MAXCPU];
86 static u_int cpu_aff_levels;
87 
88 struct cpu_desc {
89 	u_int		cpu_impl;
90 	u_int		cpu_part_num;
91 	u_int		cpu_variant;
92 	u_int		cpu_revision;
93 	const char	*cpu_impl_name;
94 	const char	*cpu_part_name;
95 
96 	uint64_t	mpidr;
97 	uint64_t	id_aa64afr0;
98 	uint64_t	id_aa64afr1;
99 	uint64_t	id_aa64dfr0;
100 	uint64_t	id_aa64dfr1;
101 	uint64_t	id_aa64isar0;
102 	uint64_t	id_aa64isar1;
103 	uint64_t	id_aa64mmfr0;
104 	uint64_t	id_aa64mmfr1;
105 	uint64_t	id_aa64mmfr2;
106 	uint64_t	id_aa64pfr0;
107 	uint64_t	id_aa64pfr1;
108 };
109 
110 struct cpu_desc cpu_desc[MAXCPU];
111 struct cpu_desc user_cpu_desc;
112 static u_int cpu_print_regs;
113 #define	PRINT_ID_AA64_AFR0	0x00000001
114 #define	PRINT_ID_AA64_AFR1	0x00000002
115 #define	PRINT_ID_AA64_DFR0	0x00000010
116 #define	PRINT_ID_AA64_DFR1	0x00000020
117 #define	PRINT_ID_AA64_ISAR0	0x00000100
118 #define	PRINT_ID_AA64_ISAR1	0x00000200
119 #define	PRINT_ID_AA64_MMFR0	0x00001000
120 #define	PRINT_ID_AA64_MMFR1	0x00002000
121 #define	PRINT_ID_AA64_MMFR2	0x00004000
122 #define	PRINT_ID_AA64_PFR0	0x00010000
123 #define	PRINT_ID_AA64_PFR1	0x00020000
124 
125 struct cpu_parts {
126 	u_int		part_id;
127 	const char	*part_name;
128 };
129 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
130 
131 struct cpu_implementers {
132 	u_int			impl_id;
133 	const char		*impl_name;
134 	/*
135 	 * Part number is implementation defined
136 	 * so each vendor will have its own set of values and names.
137 	 */
138 	const struct cpu_parts	*cpu_parts;
139 };
140 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
141 
142 /*
143  * Per-implementer table of (PartNum, CPU Name) pairs.
144  */
145 /* ARM Ltd. */
146 static const struct cpu_parts cpu_parts_arm[] = {
147 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
148 	{ CPU_PART_CORTEX_A35, "Cortex-A35" },
149 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
150 	{ CPU_PART_CORTEX_A55, "Cortex-A55" },
151 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
152 	{ CPU_PART_CORTEX_A72, "Cortex-A72" },
153 	{ CPU_PART_CORTEX_A73, "Cortex-A73" },
154 	{ CPU_PART_CORTEX_A75, "Cortex-A75" },
155 	CPU_PART_NONE,
156 };
157 /* Cavium */
158 static const struct cpu_parts cpu_parts_cavium[] = {
159 	{ CPU_PART_THUNDERX, "ThunderX" },
160 	{ CPU_PART_THUNDERX2, "ThunderX2" },
161 	CPU_PART_NONE,
162 };
163 
164 /* Unknown */
165 static const struct cpu_parts cpu_parts_none[] = {
166 	CPU_PART_NONE,
167 };
168 
169 /*
170  * Implementers table.
171  */
172 const struct cpu_implementers cpu_implementers[] = {
173 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
174 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
175 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
176 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
177 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
178 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
179 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
180 	{ CPU_IMPL_APM,		"APM",		cpu_parts_none },
181 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
182 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
183 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
184 	CPU_IMPLEMENTER_NONE,
185 };
186 
187 #define	MRS_TYPE_MASK		0xf
188 #define	MRS_INVALID		0
189 #define	MRS_EXACT		1
190 #define	MRS_EXACT_VAL(x)	(MRS_EXACT | ((x) << 4))
191 #define	MRS_EXACT_FIELD(x)	((x) >> 4)
192 #define	MRS_LOWER		2
193 
194 struct mrs_field {
195 	bool		sign;
196 	u_int		type;
197 	u_int		shift;
198 };
199 
200 #define	MRS_FIELD(_sign, _type, _shift)					\
201 	{								\
202 		.sign = (_sign),					\
203 		.type = (_type),					\
204 		.shift = (_shift),					\
205 	}
206 
207 #define	MRS_FIELD_END	{ .type = MRS_INVALID, }
208 
209 static struct mrs_field id_aa64isar0_fields[] = {
210 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_DP_SHIFT),
211 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM4_SHIFT),
212 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM3_SHIFT),
213 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA3_SHIFT),
214 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_RDM_SHIFT),
215 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_ATOMIC_SHIFT),
216 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_CRC32_SHIFT),
217 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA2_SHIFT),
218 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA1_SHIFT),
219 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_AES_SHIFT),
220 	MRS_FIELD_END,
221 };
222 
223 static struct mrs_field id_aa64isar1_fields[] = {
224 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPI_SHIFT),
225 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPA_SHIFT),
226 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_LRCPC_SHIFT),
227 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_FCMA_SHIFT),
228 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_JSCVT_SHIFT),
229 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_API_SHIFT),
230 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_APA_SHIFT),
231 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_DPB_SHIFT),
232 	MRS_FIELD_END,
233 };
234 
235 static struct mrs_field id_aa64pfr0_fields[] = {
236 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT),
237 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT),
238 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT),
239 	MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_ADV_SIMD_SHIFT),
240 	MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_FP_SHIFT),
241 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT),
242 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT),
243 	MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT),
244 	MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT),
245 	MRS_FIELD_END,
246 };
247 
248 static struct mrs_field id_aa64dfr0_fields[] = {
249 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMS_VER_SHIFT),
250 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPS_SHIFT),
251 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPS_SHIFT),
252 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPS_SHIFT),
253 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMU_VER_SHIFT),
254 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TRACE_VER_SHIFT),
255 	MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DEBUG_VER_SHIFT),
256 	MRS_FIELD_END,
257 };
258 
259 struct mrs_user_reg {
260 	u_int		CRm;
261 	u_int		Op2;
262 	size_t		offset;
263 	struct mrs_field *fields;
264 };
265 
266 static struct mrs_user_reg user_regs[] = {
267 	{	/* id_aa64isar0_el1 */
268 		.CRm = 6,
269 		.Op2 = 0,
270 		.offset = __offsetof(struct cpu_desc, id_aa64isar0),
271 		.fields = id_aa64isar0_fields,
272 	},
273 	{	/* id_aa64isar1_el1 */
274 		.CRm = 6,
275 		.Op2 = 1,
276 		.offset = __offsetof(struct cpu_desc, id_aa64isar1),
277 		.fields = id_aa64isar1_fields,
278 	},
279 	{	/* id_aa64pfr0_el1 */
280 		.CRm = 4,
281 		.Op2 = 0,
282 		.offset = __offsetof(struct cpu_desc, id_aa64pfr0),
283 		.fields = id_aa64pfr0_fields,
284 	},
285 	{	/* id_aa64dfr0_el1 */
286 		.CRm = 5,
287 		.Op2 = 0,
288 		.offset = __offsetof(struct cpu_desc, id_aa64dfr0),
289 		.fields = id_aa64dfr0_fields,
290 	},
291 };
292 
293 #define	CPU_DESC_FIELD(desc, idx)					\
294     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
295 
296 static int
297 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
298     uint32_t esr)
299 {
300 	uint64_t value;
301 	int CRm, Op2, i, reg;
302 
303 	if ((insn & MRS_MASK) != MRS_VALUE)
304 		return (0);
305 
306 	/*
307 	 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
308 	 * These are in the EL1 CPU identification space.
309 	 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
310 	 * CRm == {4-7} holds the ID_AA64 registers.
311 	 *
312 	 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
313 	 * Table D9-2 System instruction encodings for non-Debug System
314 	 * register accesses.
315 	 */
316 	if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
317 		return (0);
318 
319 	CRm = mrs_CRm(insn);
320 	if (CRm > 7 || (CRm < 4 && CRm != 0))
321 		return (0);
322 
323 	Op2 = mrs_Op2(insn);
324 	value = 0;
325 
326 	for (i = 0; i < nitems(user_regs); i++) {
327 		if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
328 			value = CPU_DESC_FIELD(user_cpu_desc, i);
329 			break;
330 		}
331 	}
332 
333 	if (CRm == 0) {
334 		switch (Op2) {
335 		case 0:
336 			value = READ_SPECIALREG(midr_el1);
337 			break;
338 		case 5:
339 			value = READ_SPECIALREG(mpidr_el1);
340 			break;
341 		case 6:
342 			value = READ_SPECIALREG(revidr_el1);
343 			break;
344 		default:
345 			return (0);
346 		}
347 	}
348 
349 	/*
350 	 * We will handle this instruction, move to the next so we
351 	 * don't trap here again.
352 	 */
353 	frame->tf_elr += INSN_SIZE;
354 
355 	reg = MRS_REGISTER(insn);
356 	/* If reg is 31 then write to xzr, i.e. do nothing */
357 	if (reg == 31)
358 		return (1);
359 
360 	if (reg < nitems(frame->tf_x))
361 		frame->tf_x[reg] = value;
362 	else if (reg == 30)
363 		frame->tf_lr = value;
364 
365 	return (1);
366 }
367 
368 static void
369 update_user_regs(u_int cpu)
370 {
371 	struct mrs_field *fields;
372 	uint64_t cur, value;
373 	int i, j, cur_field, new_field;
374 
375 	for (i = 0; i < nitems(user_regs); i++) {
376 		value = CPU_DESC_FIELD(cpu_desc[cpu], i);
377 		if (cpu == 0)
378 			cur = value;
379 		else
380 			cur = CPU_DESC_FIELD(user_cpu_desc, i);
381 
382 		fields = user_regs[i].fields;
383 		for (j = 0; fields[j].type != 0; j++) {
384 			switch (fields[j].type & MRS_TYPE_MASK) {
385 			case MRS_EXACT:
386 				cur &= ~(0xfu << fields[j].shift);
387 				cur |=
388 				    (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
389 				    fields[j].shift;
390 				break;
391 			case MRS_LOWER:
392 				new_field = (value >> fields[j].shift) & 0xf;
393 				cur_field = (cur >> fields[j].shift) & 0xf;
394 				if ((fields[j].sign &&
395 				     (int)new_field < (int)cur_field) ||
396 				    (!fields[j].sign &&
397 				     (u_int)new_field < (u_int)cur_field)) {
398 					cur &= ~(0xfu << fields[j].shift);
399 					cur |= new_field << fields[j].shift;
400 				}
401 				break;
402 			default:
403 				panic("Invalid field type: %d", fields[j].type);
404 			}
405 		}
406 
407 		CPU_DESC_FIELD(user_cpu_desc, i) = cur;
408 	}
409 }
410 
411 static void
412 identify_cpu_sysinit(void *dummy __unused)
413 {
414 	int cpu;
415 
416 	/* Create a user visible cpu description with safe values */
417 	memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
418 	/* Safe values for these registers */
419 	user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_ADV_SIMD_NONE |
420 	    ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
421 	user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DEBUG_VER_8;
422 
423 
424 	CPU_FOREACH(cpu) {
425 		print_cpu_features(cpu);
426 		update_user_regs(cpu);
427 	}
428 
429 	install_undef_handler(true, user_mrs_handler);
430 }
431 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
432 
433 void
434 print_cpu_features(u_int cpu)
435 {
436 	struct sbuf *sb;
437 	int printed;
438 
439 	sb = sbuf_new_auto();
440 	sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
441 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
442 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
443 
444 	sbuf_cat(sb, " affinity:");
445 	switch(cpu_aff_levels) {
446 	default:
447 	case 4:
448 		sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
449 		/* FALLTHROUGH */
450 	case 3:
451 		sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
452 		/* FALLTHROUGH */
453 	case 2:
454 		sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
455 		/* FALLTHROUGH */
456 	case 1:
457 	case 0: /* On UP this will be zero */
458 		sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
459 		break;
460 	}
461 	sbuf_finish(sb);
462 	printf("%s\n", sbuf_data(sb));
463 	sbuf_clear(sb);
464 
465 	/*
466 	 * There is a hardware errata where, if one CPU is performing a TLB
467 	 * invalidation while another is performing a store-exclusive the
468 	 * store-exclusive may return the wrong status. A workaround seems
469 	 * to be to use an IPI to invalidate on each CPU, however given the
470 	 * limited number of affected units (pass 1.1 is the evaluation
471 	 * hardware revision), and the lack of information from Cavium
472 	 * this has not been implemented.
473 	 *
474 	 * At the time of writing this the only information is from:
475 	 * https://lkml.org/lkml/2016/8/4/722
476 	 */
477 	/*
478 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
479 	 * triggers on pass 2.0+.
480 	 */
481 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
482 	    CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
483 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
484 		    "hardware bugs that may cause the incorrect operation of "
485 		    "atomic operations.\n");
486 
487 	if (cpu != 0 && cpu_print_regs == 0)
488 		return;
489 
490 #define SEP_STR	((printed++) == 0) ? "" : ","
491 
492 	/* AArch64 Instruction Set Attribute Register 0 */
493 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
494 		printed = 0;
495 		sbuf_printf(sb, " Instruction Set Attributes 0 = <");
496 
497 		switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
498 		case ID_AA64ISAR0_RDM_NONE:
499 			break;
500 		case ID_AA64ISAR0_RDM_IMPL:
501 			sbuf_printf(sb, "%sRDM", SEP_STR);
502 			break;
503 		default:
504 			sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
505 		}
506 
507 		switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
508 		case ID_AA64ISAR0_ATOMIC_NONE:
509 			break;
510 		case ID_AA64ISAR0_ATOMIC_IMPL:
511 			sbuf_printf(sb, "%sAtomic", SEP_STR);
512 			break;
513 		default:
514 			sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
515 		}
516 
517 		switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
518 		case ID_AA64ISAR0_AES_NONE:
519 			break;
520 		case ID_AA64ISAR0_AES_BASE:
521 			sbuf_printf(sb, "%sAES", SEP_STR);
522 			break;
523 		case ID_AA64ISAR0_AES_PMULL:
524 			sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
525 			break;
526 		default:
527 			sbuf_printf(sb, "%sUnknown AES", SEP_STR);
528 			break;
529 		}
530 
531 		switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
532 		case ID_AA64ISAR0_SHA1_NONE:
533 			break;
534 		case ID_AA64ISAR0_SHA1_BASE:
535 			sbuf_printf(sb, "%sSHA1", SEP_STR);
536 			break;
537 		default:
538 			sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
539 			break;
540 		}
541 
542 		switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
543 		case ID_AA64ISAR0_SHA2_NONE:
544 			break;
545 		case ID_AA64ISAR0_SHA2_BASE:
546 			sbuf_printf(sb, "%sSHA2", SEP_STR);
547 			break;
548 		case ID_AA64ISAR0_SHA2_512:
549 			sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
550 			break;
551 		default:
552 			sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
553 			break;
554 		}
555 
556 		switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
557 		case ID_AA64ISAR0_CRC32_NONE:
558 			break;
559 		case ID_AA64ISAR0_CRC32_BASE:
560 			sbuf_printf(sb, "%sCRC32", SEP_STR);
561 			break;
562 		default:
563 			sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
564 			break;
565 		}
566 
567 		switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
568 		case ID_AA64ISAR0_SHA3_NONE:
569 			break;
570 		case ID_AA64ISAR0_SHA3_IMPL:
571 			sbuf_printf(sb, "%sSHA3", SEP_STR);
572 			break;
573 		default:
574 			sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
575 			break;
576 		}
577 
578 		switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
579 		case ID_AA64ISAR0_SM3_NONE:
580 			break;
581 		case ID_AA64ISAR0_SM3_IMPL:
582 			sbuf_printf(sb, "%sSM3", SEP_STR);
583 			break;
584 		default:
585 			sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
586 			break;
587 		}
588 
589 		switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
590 		case ID_AA64ISAR0_SM4_NONE:
591 			break;
592 		case ID_AA64ISAR0_SM4_IMPL:
593 			sbuf_printf(sb, "%sSM4", SEP_STR);
594 			break;
595 		default:
596 			sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
597 			break;
598 		}
599 
600 		switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
601 		case ID_AA64ISAR0_DP_NONE:
602 			break;
603 		case ID_AA64ISAR0_DP_IMPL:
604 			sbuf_printf(sb, "%sDotProd", SEP_STR);
605 			break;
606 		default:
607 			sbuf_printf(sb, "%sUnknown DP", SEP_STR);
608 			break;
609 		}
610 
611 		if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
612 			sbuf_printf(sb, "%s%#lx", SEP_STR,
613 			    cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
614 
615 		sbuf_finish(sb);
616 		printf("%s>\n", sbuf_data(sb));
617 		sbuf_clear(sb);
618 	}
619 
620 	/* AArch64 Instruction Set Attribute Register 1 */
621 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
622 		printed = 0;
623 		sbuf_printf(sb, " Instruction Set Attributes 1 = <");
624 
625 		switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
626 		case ID_AA64ISAR1_GPI_NONE:
627 			break;
628 		case ID_AA64ISAR1_GPI_IMPL:
629 			sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
630 			break;
631 		default:
632 			sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
633 			break;
634 		}
635 
636 		switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
637 		case ID_AA64ISAR1_GPA_NONE:
638 			break;
639 		case ID_AA64ISAR1_GPA_IMPL:
640 			sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
641 			break;
642 		default:
643 			sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
644 			break;
645 		}
646 
647 		switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
648 		case ID_AA64ISAR1_LRCPC_NONE:
649 			break;
650 		case ID_AA64ISAR1_LRCPC_IMPL:
651 			sbuf_printf(sb, "%sRCpc", SEP_STR);
652 			break;
653 		default:
654 			sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
655 			break;
656 		}
657 
658 		switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
659 		case ID_AA64ISAR1_FCMA_NONE:
660 			break;
661 		case ID_AA64ISAR1_FCMA_IMPL:
662 			sbuf_printf(sb, "%sFCMA", SEP_STR);
663 			break;
664 		default:
665 			sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
666 			break;
667 		}
668 
669 		switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
670 		case ID_AA64ISAR1_JSCVT_NONE:
671 			break;
672 		case ID_AA64ISAR1_JSCVT_IMPL:
673 			sbuf_printf(sb, "%sJS Conv", SEP_STR);
674 			break;
675 		default:
676 			sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
677 			break;
678 		}
679 
680 		switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
681 		case ID_AA64ISAR1_API_NONE:
682 			break;
683 		case ID_AA64ISAR1_API_IMPL:
684 			sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
685 			break;
686 		default:
687 			sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
688 			break;
689 		}
690 
691 		switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
692 		case ID_AA64ISAR1_APA_NONE:
693 			break;
694 		case ID_AA64ISAR1_APA_IMPL:
695 			sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
696 			break;
697 		default:
698 			sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
699 			break;
700 		}
701 
702 		switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
703 		case ID_AA64ISAR1_DPB_NONE:
704 			break;
705 		case ID_AA64ISAR1_DPB_IMPL:
706 			sbuf_printf(sb, "%sDC CVAP", SEP_STR);
707 			break;
708 		default:
709 			sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
710 			break;
711 		}
712 
713 		if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
714 			sbuf_printf(sb, "%s%#lx", SEP_STR,
715 			    cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
716 		sbuf_finish(sb);
717 		printf("%s>\n", sbuf_data(sb));
718 		sbuf_clear(sb);
719 	}
720 
721 	/* AArch64 Processor Feature Register 0 */
722 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
723 		printed = 0;
724 		sbuf_printf(sb, "         Processor Features 0 = <");
725 
726 		switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
727 		case ID_AA64PFR0_SVE_NONE:
728 			break;
729 		case ID_AA64PFR0_SVE_IMPL:
730 			sbuf_printf(sb, "%sSVE", SEP_STR);
731 			break;
732 		default:
733 			sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
734 			break;
735 		}
736 
737 		switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
738 		case ID_AA64PFR0_RAS_NONE:
739 			break;
740 		case ID_AA64PFR0_RAS_V1:
741 			sbuf_printf(sb, "%sRASv1", SEP_STR);
742 			break;
743 		default:
744 			sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
745 			break;
746 		}
747 
748 		switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
749 		case ID_AA64PFR0_GIC_CPUIF_NONE:
750 			break;
751 		case ID_AA64PFR0_GIC_CPUIF_EN:
752 			sbuf_printf(sb, "%sGIC", SEP_STR);
753 			break;
754 		default:
755 			sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
756 			break;
757 		}
758 
759 		switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
760 		case ID_AA64PFR0_ADV_SIMD_NONE:
761 			break;
762 		case ID_AA64PFR0_ADV_SIMD_IMPL:
763 			sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
764 			break;
765 		case ID_AA64PFR0_ADV_SIMD_HP:
766 			sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
767 			break;
768 		default:
769 			sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
770 			break;
771 		}
772 
773 		switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
774 		case ID_AA64PFR0_FP_NONE:
775 			break;
776 		case ID_AA64PFR0_FP_IMPL:
777 			sbuf_printf(sb, "%sFloat", SEP_STR);
778 			break;
779 		case ID_AA64PFR0_FP_HP:
780 			sbuf_printf(sb, "%sFloat+HP", SEP_STR);
781 			break;
782 		default:
783 			sbuf_printf(sb, "%sUnknown Float", SEP_STR);
784 			break;
785 		}
786 
787 		switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
788 		case ID_AA64PFR0_EL3_NONE:
789 			sbuf_printf(sb, "%sNo EL3", SEP_STR);
790 			break;
791 		case ID_AA64PFR0_EL3_64:
792 			sbuf_printf(sb, "%sEL3", SEP_STR);
793 			break;
794 		case ID_AA64PFR0_EL3_64_32:
795 			sbuf_printf(sb, "%sEL3 32", SEP_STR);
796 			break;
797 		default:
798 			sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
799 			break;
800 		}
801 
802 		switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
803 		case ID_AA64PFR0_EL2_NONE:
804 			sbuf_printf(sb, "%sNo EL2", SEP_STR);
805 			break;
806 		case ID_AA64PFR0_EL2_64:
807 			sbuf_printf(sb, "%sEL2", SEP_STR);
808 			break;
809 		case ID_AA64PFR0_EL2_64_32:
810 			sbuf_printf(sb, "%sEL2 32", SEP_STR);
811 			break;
812 		default:
813 			sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
814 			break;
815 		}
816 
817 		switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
818 		case ID_AA64PFR0_EL1_64:
819 			sbuf_printf(sb, "%sEL1", SEP_STR);
820 			break;
821 		case ID_AA64PFR0_EL1_64_32:
822 			sbuf_printf(sb, "%sEL1 32", SEP_STR);
823 			break;
824 		default:
825 			sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
826 			break;
827 		}
828 
829 		switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
830 		case ID_AA64PFR0_EL0_64:
831 			sbuf_printf(sb, "%sEL0", SEP_STR);
832 			break;
833 		case ID_AA64PFR0_EL0_64_32:
834 			sbuf_printf(sb, "%sEL0 32", SEP_STR);
835 			break;
836 		default:
837 			sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
838 			break;
839 		}
840 
841 		if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
842 			sbuf_printf(sb, "%s%#lx", SEP_STR,
843 			    cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
844 
845 		sbuf_finish(sb);
846 		printf("%s>\n", sbuf_data(sb));
847 		sbuf_clear(sb);
848 	}
849 
850 	/* AArch64 Processor Feature Register 1 */
851 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
852 		printf("         Processor Features 1 = <%#lx>\n",
853 		    cpu_desc[cpu].id_aa64pfr1);
854 	}
855 
856 	/* AArch64 Memory Model Feature Register 0 */
857 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
858 		printed = 0;
859 		sbuf_printf(sb, "      Memory Model Features 0 = <");
860 		switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
861 		case ID_AA64MMFR0_TGRAN4_NONE:
862 			break;
863 		case ID_AA64MMFR0_TGRAN4_IMPL:
864 			sbuf_printf(sb, "%s4k Granule", SEP_STR);
865 			break;
866 		default:
867 			sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
868 			break;
869 		}
870 
871 		switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
872 		case ID_AA64MMFR0_TGRAN16_NONE:
873 			break;
874 		case ID_AA64MMFR0_TGRAN16_IMPL:
875 			sbuf_printf(sb, "%s16k Granule", SEP_STR);
876 			break;
877 		default:
878 			sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
879 			break;
880 		}
881 
882 		switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
883 		case ID_AA64MMFR0_TGRAN64_NONE:
884 			break;
885 		case ID_AA64MMFR0_TGRAN64_IMPL:
886 			sbuf_printf(sb, "%s64k Granule", SEP_STR);
887 			break;
888 		default:
889 			sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
890 			break;
891 		}
892 
893 		switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
894 		case ID_AA64MMFR0_BIGEND_FIXED:
895 			break;
896 		case ID_AA64MMFR0_BIGEND_MIXED:
897 			sbuf_printf(sb, "%sMixedEndian", SEP_STR);
898 			break;
899 		default:
900 			sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
901 			break;
902 		}
903 
904 		switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
905 		case ID_AA64MMFR0_BIGEND_EL0_FIXED:
906 			break;
907 		case ID_AA64MMFR0_BIGEND_EL0_MIXED:
908 			sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
909 			break;
910 		default:
911 			sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
912 			break;
913 		}
914 
915 		switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
916 		case ID_AA64MMFR0_S_NS_MEM_NONE:
917 			break;
918 		case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
919 			sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
920 			break;
921 		default:
922 			sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
923 			break;
924 		}
925 
926 		switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
927 		case ID_AA64MMFR0_ASID_BITS_8:
928 			sbuf_printf(sb, "%s8bit ASID", SEP_STR);
929 			break;
930 		case ID_AA64MMFR0_ASID_BITS_16:
931 			sbuf_printf(sb, "%s16bit ASID", SEP_STR);
932 			break;
933 		default:
934 			sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
935 			break;
936 		}
937 
938 		switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
939 		case ID_AA64MMFR0_PA_RANGE_4G:
940 			sbuf_printf(sb, "%s4GB PA", SEP_STR);
941 			break;
942 		case ID_AA64MMFR0_PA_RANGE_64G:
943 			sbuf_printf(sb, "%s64GB PA", SEP_STR);
944 			break;
945 		case ID_AA64MMFR0_PA_RANGE_1T:
946 			sbuf_printf(sb, "%s1TB PA", SEP_STR);
947 			break;
948 		case ID_AA64MMFR0_PA_RANGE_4T:
949 			sbuf_printf(sb, "%s4TB PA", SEP_STR);
950 			break;
951 		case ID_AA64MMFR0_PA_RANGE_16T:
952 			sbuf_printf(sb, "%s16TB PA", SEP_STR);
953 			break;
954 		case ID_AA64MMFR0_PA_RANGE_256T:
955 			sbuf_printf(sb, "%s256TB PA", SEP_STR);
956 			break;
957 		case ID_AA64MMFR0_PA_RANGE_4P:
958 			sbuf_printf(sb, "%s4PB PA", SEP_STR);
959 			break;
960 		default:
961 			sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
962 			break;
963 		}
964 
965 		if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
966 			sbuf_printf(sb, "%s%#lx", SEP_STR,
967 			    cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
968 		sbuf_finish(sb);
969 		printf("%s>\n", sbuf_data(sb));
970 		sbuf_clear(sb);
971 	}
972 
973 	/* AArch64 Memory Model Feature Register 1 */
974 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
975 		printed = 0;
976 		sbuf_printf(sb, "      Memory Model Features 1 = <");
977 
978 		switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
979 		case ID_AA64MMFR1_XNX_NONE:
980 			break;
981 		case ID_AA64MMFR1_XNX_IMPL:
982 			sbuf_printf(sb, "%sEL2 XN", SEP_STR);
983 			break;
984 		default:
985 			sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
986 			break;
987 		}
988 
989 		switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
990 		case ID_AA64MMFR1_SPEC_SEI_NONE:
991 			break;
992 		case ID_AA64MMFR1_SPEC_SEI_IMPL:
993 			sbuf_printf(sb, "%sSpecSEI", SEP_STR);
994 			break;
995 		default:
996 			sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
997 			break;
998 		}
999 
1000 		switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
1001 		case ID_AA64MMFR1_PAN_NONE:
1002 			break;
1003 		case ID_AA64MMFR1_PAN_IMPL:
1004 			sbuf_printf(sb, "%sPAN", SEP_STR);
1005 			break;
1006 		case ID_AA64MMFR1_PAN_ATS1E1:
1007 			sbuf_printf(sb, "%sPAN+AT", SEP_STR);
1008 			break;
1009 		default:
1010 			sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
1011 			break;
1012 		}
1013 
1014 		switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
1015 		case ID_AA64MMFR1_LO_NONE:
1016 			break;
1017 		case ID_AA64MMFR1_LO_IMPL:
1018 			sbuf_printf(sb, "%sLO", SEP_STR);
1019 			break;
1020 		default:
1021 			sbuf_printf(sb, "%sUnknown LO", SEP_STR);
1022 			break;
1023 		}
1024 
1025 		switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
1026 		case ID_AA64MMFR1_HPDS_NONE:
1027 			break;
1028 		case ID_AA64MMFR1_HPDS_HPD:
1029 			sbuf_printf(sb, "%sHPDS", SEP_STR);
1030 			break;
1031 		case ID_AA64MMFR1_HPDS_TTPBHA:
1032 			sbuf_printf(sb, "%sTTPBHA", SEP_STR);
1033 			break;
1034 		default:
1035 			sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
1036 			break;
1037 		}
1038 
1039 		switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
1040 		case ID_AA64MMFR1_VH_NONE:
1041 			break;
1042 		case ID_AA64MMFR1_VH_IMPL:
1043 			sbuf_printf(sb, "%sVHE", SEP_STR);
1044 			break;
1045 		default:
1046 			sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
1047 			break;
1048 		}
1049 
1050 		switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
1051 		case ID_AA64MMFR1_VMIDBITS_8:
1052 			break;
1053 		case ID_AA64MMFR1_VMIDBITS_16:
1054 			sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
1055 			break;
1056 		default:
1057 			sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
1058 			break;
1059 		}
1060 
1061 		switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
1062 		case ID_AA64MMFR1_HAFDBS_NONE:
1063 			break;
1064 		case ID_AA64MMFR1_HAFDBS_AF:
1065 			sbuf_printf(sb, "%sAF", SEP_STR);
1066 			break;
1067 		case ID_AA64MMFR1_HAFDBS_AF_DBS:
1068 			sbuf_printf(sb, "%sAF+DBS", SEP_STR);
1069 			break;
1070 		default:
1071 			sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
1072 			break;
1073 		}
1074 
1075 		if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
1076 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1077 			    cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
1078 		sbuf_finish(sb);
1079 		printf("%s>\n", sbuf_data(sb));
1080 		sbuf_clear(sb);
1081 	}
1082 
1083 	/* AArch64 Memory Model Feature Register 2 */
1084 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
1085 		printed = 0;
1086 		sbuf_printf(sb, "      Memory Model Features 2 = <");
1087 
1088 		switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
1089 		case ID_AA64MMFR2_NV_NONE:
1090 			break;
1091 		case ID_AA64MMFR2_NV_IMPL:
1092 			sbuf_printf(sb, "%sNestedVirt", SEP_STR);
1093 			break;
1094 		default:
1095 			sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
1096 			break;
1097 		}
1098 
1099 		switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
1100 		case ID_AA64MMFR2_CCIDX_32:
1101 			sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
1102 			break;
1103 		case ID_AA64MMFR2_CCIDX_64:
1104 			sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
1105 			break;
1106 		default:
1107 			sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
1108 			break;
1109 		}
1110 
1111 		switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
1112 		case ID_AA64MMFR2_VA_RANGE_48:
1113 			sbuf_printf(sb, "%s48b VA", SEP_STR);
1114 			break;
1115 		case ID_AA64MMFR2_VA_RANGE_52:
1116 			sbuf_printf(sb, "%s52b VA", SEP_STR);
1117 			break;
1118 		default:
1119 			sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
1120 			break;
1121 		}
1122 
1123 		switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
1124 		case ID_AA64MMFR2_IESB_NONE:
1125 			break;
1126 		case ID_AA64MMFR2_IESB_IMPL:
1127 			sbuf_printf(sb, "%sIESB", SEP_STR);
1128 			break;
1129 		default:
1130 			sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
1131 			break;
1132 		}
1133 
1134 		switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
1135 		case ID_AA64MMFR2_LSM_NONE:
1136 			break;
1137 		case ID_AA64MMFR2_LSM_IMPL:
1138 			sbuf_printf(sb, "%sLSM", SEP_STR);
1139 			break;
1140 		default:
1141 			sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
1142 			break;
1143 		}
1144 
1145 		switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
1146 		case ID_AA64MMFR2_UAO_NONE:
1147 			break;
1148 		case ID_AA64MMFR2_UAO_IMPL:
1149 			sbuf_printf(sb, "%sUAO", SEP_STR);
1150 			break;
1151 		default:
1152 			sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
1153 			break;
1154 		}
1155 
1156 		switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
1157 		case ID_AA64MMFR2_CNP_NONE:
1158 			break;
1159 		case ID_AA64MMFR2_CNP_IMPL:
1160 			sbuf_printf(sb, "%sCnP", SEP_STR);
1161 			break;
1162 		default:
1163 			sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1164 			break;
1165 		}
1166 
1167 		if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1168 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1169 			    cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1170 		sbuf_finish(sb);
1171 		printf("%s>\n", sbuf_data(sb));
1172 		sbuf_clear(sb);
1173 	}
1174 
1175 	/* AArch64 Debug Feature Register 0 */
1176 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1177 		printed = 0;
1178 		sbuf_printf(sb, "             Debug Features 0 = <");
1179 		switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
1180 		case ID_AA64DFR0_PMS_VER_NONE:
1181 			break;
1182 		case ID_AA64DFR0_PMS_VER_V1:
1183 			sbuf_printf(sb, "%sSPE v1", SEP_STR);
1184 			break;
1185 		default:
1186 			sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1187 			break;
1188 		}
1189 
1190 		sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1191 		    ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
1192 
1193 		sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1194 		    ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
1195 
1196 		sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1197 		    ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
1198 
1199 		switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
1200 		case ID_AA64DFR0_PMU_VER_NONE:
1201 			break;
1202 		case ID_AA64DFR0_PMU_VER_3:
1203 			sbuf_printf(sb, "%sPMUv3", SEP_STR);
1204 			break;
1205 		case ID_AA64DFR0_PMU_VER_3_1:
1206 			sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1207 			break;
1208 		case ID_AA64DFR0_PMU_VER_IMPL:
1209 			sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1210 			break;
1211 		default:
1212 			sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1213 			break;
1214 		}
1215 
1216 		switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
1217 		case ID_AA64DFR0_TRACE_VER_NONE:
1218 			break;
1219 		case ID_AA64DFR0_TRACE_VER_IMPL:
1220 			sbuf_printf(sb, "%sTrace", SEP_STR);
1221 			break;
1222 		default:
1223 			sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1224 			break;
1225 		}
1226 
1227 		switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
1228 		case ID_AA64DFR0_DEBUG_VER_8:
1229 			sbuf_printf(sb, "%sDebug v8", SEP_STR);
1230 			break;
1231 		case ID_AA64DFR0_DEBUG_VER_8_VHE:
1232 			sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1233 			break;
1234 		case ID_AA64DFR0_DEBUG_VER_8_2:
1235 			sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1236 			break;
1237 		default:
1238 			sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1239 			break;
1240 		}
1241 
1242 		if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1243 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1244 			    cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1245 		sbuf_finish(sb);
1246 		printf("%s>\n", sbuf_data(sb));
1247 		sbuf_clear(sb);
1248 	}
1249 
1250 	/* AArch64 Memory Model Feature Register 1 */
1251 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1252 		printf("             Debug Features 1 = <%#lx>\n",
1253 		    cpu_desc[cpu].id_aa64dfr1);
1254 	}
1255 
1256 	/* AArch64 Auxiliary Feature Register 0 */
1257 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1258 		printf("         Auxiliary Features 0 = <%#lx>\n",
1259 		    cpu_desc[cpu].id_aa64afr0);
1260 	}
1261 
1262 	/* AArch64 Auxiliary Feature Register 1 */
1263 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1264 		printf("         Auxiliary Features 1 = <%#lx>\n",
1265 		    cpu_desc[cpu].id_aa64afr1);
1266 	}
1267 
1268 	sbuf_delete(sb);
1269 	sb = NULL;
1270 #undef SEP_STR
1271 }
1272 
1273 void
1274 identify_cpu(void)
1275 {
1276 	u_int midr;
1277 	u_int impl_id;
1278 	u_int part_id;
1279 	u_int cpu;
1280 	size_t i;
1281 	const struct cpu_parts *cpu_partsp = NULL;
1282 
1283 	cpu = PCPU_GET(cpuid);
1284 	midr = get_midr();
1285 
1286 	/*
1287 	 * Store midr to pcpu to allow fast reading
1288 	 * from EL0, EL1 and assembly code.
1289 	 */
1290 	PCPU_SET(midr, midr);
1291 
1292 	impl_id = CPU_IMPL(midr);
1293 	for (i = 0; i < nitems(cpu_implementers); i++) {
1294 		if (impl_id == cpu_implementers[i].impl_id ||
1295 		    cpu_implementers[i].impl_id == 0) {
1296 			cpu_desc[cpu].cpu_impl = impl_id;
1297 			cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1298 			cpu_partsp = cpu_implementers[i].cpu_parts;
1299 			break;
1300 		}
1301 	}
1302 
1303 	part_id = CPU_PART(midr);
1304 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
1305 		if (part_id == cpu_partsp[i].part_id ||
1306 		    cpu_partsp[i].part_id == 0) {
1307 			cpu_desc[cpu].cpu_part_num = part_id;
1308 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1309 			break;
1310 		}
1311 	}
1312 
1313 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1314 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1315 
1316 	/* Save affinity for current CPU */
1317 	cpu_desc[cpu].mpidr = get_mpidr();
1318 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1319 
1320 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1321 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1322 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1323 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1324 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1325 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1326 	cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1327 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1328 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1329 
1330 	if (cpu != 0) {
1331 		/*
1332 		 * This code must run on one cpu at a time, but we are
1333 		 * not scheduling on the current core so implement a
1334 		 * simple spinlock.
1335 		 */
1336 		while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1337 			__asm __volatile("wfe" ::: "memory");
1338 
1339 		switch (cpu_aff_levels) {
1340 		case 0:
1341 			if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1342 			    CPU_AFF0(cpu_desc[0].mpidr))
1343 				cpu_aff_levels = 1;
1344 			/* FALLTHROUGH */
1345 		case 1:
1346 			if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1347 			    CPU_AFF1(cpu_desc[0].mpidr))
1348 				cpu_aff_levels = 2;
1349 			/* FALLTHROUGH */
1350 		case 2:
1351 			if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1352 			    CPU_AFF2(cpu_desc[0].mpidr))
1353 				cpu_aff_levels = 3;
1354 			/* FALLTHROUGH */
1355 		case 3:
1356 			if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1357 			    CPU_AFF3(cpu_desc[0].mpidr))
1358 				cpu_aff_levels = 4;
1359 			break;
1360 		}
1361 
1362 		if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1363 			cpu_print_regs |= PRINT_ID_AA64_AFR0;
1364 		if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1365 			cpu_print_regs |= PRINT_ID_AA64_AFR1;
1366 
1367 		if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1368 			cpu_print_regs |= PRINT_ID_AA64_DFR0;
1369 		if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1370 			cpu_print_regs |= PRINT_ID_AA64_DFR1;
1371 
1372 		if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1373 			cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1374 		if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1375 			cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1376 
1377 		if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1378 			cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1379 		if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1380 			cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1381 		if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1382 			cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1383 
1384 		if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1385 			cpu_print_regs |= PRINT_ID_AA64_PFR0;
1386 		if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1387 			cpu_print_regs |= PRINT_ID_AA64_PFR1;
1388 
1389 		/* Wake up the other CPUs */
1390 		atomic_store_rel_int(&ident_lock, 0);
1391 		__asm __volatile("sev" ::: "memory");
1392 	}
1393 }
1394