xref: /freebsd/sys/arm64/arm64/identcpu.c (revision 42249ef2)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47 #include <machine/elf.h>
48 
49 static int ident_lock;
50 static void print_cpu_features(u_int cpu);
51 static u_long parse_cpu_features_hwcap(u_int cpu);
52 
53 char machine[] = "arm64";
54 
55 #ifdef SCTL_MASK32
56 extern int adaptive_machine_arch;
57 #endif
58 
59 static int
60 sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
61 {
62 #ifdef SCTL_MASK32
63 	static const char machine32[] = "arm";
64 #endif
65 	int error;
66 #ifdef SCTL_MASK32
67 	if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
68 		error = SYSCTL_OUT(req, machine32, sizeof(machine32));
69 	else
70 #endif
71 		error = SYSCTL_OUT(req, machine, sizeof(machine));
72 	return (error);
73 }
74 
75 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
76 	CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
77 
78 static char cpu_model[64];
79 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
80 	cpu_model, sizeof(cpu_model), "Machine model");
81 
82 /*
83  * Per-CPU affinity as provided in MPIDR_EL1
84  * Indexed by CPU number in logical order selected by the system.
85  * Relevant fields can be extracted using CPU_AFFn macros,
86  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
87  *
88  * Fields used by us:
89  * Aff1 - Cluster number
90  * Aff0 - CPU number in Aff1 cluster
91  */
92 uint64_t __cpu_affinity[MAXCPU];
93 static u_int cpu_aff_levels;
94 
95 struct cpu_desc {
96 	u_int		cpu_impl;
97 	u_int		cpu_part_num;
98 	u_int		cpu_variant;
99 	u_int		cpu_revision;
100 	const char	*cpu_impl_name;
101 	const char	*cpu_part_name;
102 
103 	uint64_t	mpidr;
104 	uint64_t	id_aa64afr0;
105 	uint64_t	id_aa64afr1;
106 	uint64_t	id_aa64dfr0;
107 	uint64_t	id_aa64dfr1;
108 	uint64_t	id_aa64isar0;
109 	uint64_t	id_aa64isar1;
110 	uint64_t	id_aa64mmfr0;
111 	uint64_t	id_aa64mmfr1;
112 	uint64_t	id_aa64mmfr2;
113 	uint64_t	id_aa64pfr0;
114 	uint64_t	id_aa64pfr1;
115 };
116 
117 struct cpu_desc cpu_desc[MAXCPU];
118 struct cpu_desc user_cpu_desc;
119 static u_int cpu_print_regs;
120 #define	PRINT_ID_AA64_AFR0	0x00000001
121 #define	PRINT_ID_AA64_AFR1	0x00000002
122 #define	PRINT_ID_AA64_DFR0	0x00000010
123 #define	PRINT_ID_AA64_DFR1	0x00000020
124 #define	PRINT_ID_AA64_ISAR0	0x00000100
125 #define	PRINT_ID_AA64_ISAR1	0x00000200
126 #define	PRINT_ID_AA64_MMFR0	0x00001000
127 #define	PRINT_ID_AA64_MMFR1	0x00002000
128 #define	PRINT_ID_AA64_MMFR2	0x00004000
129 #define	PRINT_ID_AA64_PFR0	0x00010000
130 #define	PRINT_ID_AA64_PFR1	0x00020000
131 
132 struct cpu_parts {
133 	u_int		part_id;
134 	const char	*part_name;
135 };
136 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
137 
138 struct cpu_implementers {
139 	u_int			impl_id;
140 	const char		*impl_name;
141 	/*
142 	 * Part number is implementation defined
143 	 * so each vendor will have its own set of values and names.
144 	 */
145 	const struct cpu_parts	*cpu_parts;
146 };
147 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
148 
149 /*
150  * Per-implementer table of (PartNum, CPU Name) pairs.
151  */
152 /* ARM Ltd. */
153 static const struct cpu_parts cpu_parts_arm[] = {
154 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
155 	{ CPU_PART_CORTEX_A35, "Cortex-A35" },
156 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
157 	{ CPU_PART_CORTEX_A55, "Cortex-A55" },
158 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
159 	{ CPU_PART_CORTEX_A72, "Cortex-A72" },
160 	{ CPU_PART_CORTEX_A73, "Cortex-A73" },
161 	{ CPU_PART_CORTEX_A75, "Cortex-A75" },
162 	CPU_PART_NONE,
163 };
164 /* Cavium */
165 static const struct cpu_parts cpu_parts_cavium[] = {
166 	{ CPU_PART_THUNDERX, "ThunderX" },
167 	{ CPU_PART_THUNDERX2, "ThunderX2" },
168 	CPU_PART_NONE,
169 };
170 
171 /* APM / Ampere */
172 static const struct cpu_parts cpu_parts_apm[] = {
173 	{ CPU_PART_EMAG8180, "eMAG 8180" },
174 	CPU_PART_NONE,
175 };
176 
177 /* Unknown */
178 static const struct cpu_parts cpu_parts_none[] = {
179 	CPU_PART_NONE,
180 };
181 
182 /*
183  * Implementers table.
184  */
185 const struct cpu_implementers cpu_implementers[] = {
186 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
187 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
188 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
189 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
190 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
191 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
192 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
193 	{ CPU_IMPL_APM,		"APM",		cpu_parts_apm },
194 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
195 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
196 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
197 	CPU_IMPLEMENTER_NONE,
198 };
199 
200 #define	MRS_TYPE_MASK		0xf
201 #define	MRS_INVALID		0
202 #define	MRS_EXACT		1
203 #define	MRS_EXACT_VAL(x)	(MRS_EXACT | ((x) << 4))
204 #define	MRS_EXACT_FIELD(x)	((x) >> 4)
205 #define	MRS_LOWER		2
206 
207 struct mrs_field {
208 	bool		sign;
209 	u_int		type;
210 	u_int		shift;
211 };
212 
213 #define	MRS_FIELD(_sign, _type, _shift)					\
214 	{								\
215 		.sign = (_sign),					\
216 		.type = (_type),					\
217 		.shift = (_shift),					\
218 	}
219 
220 #define	MRS_FIELD_END	{ .type = MRS_INVALID, }
221 
222 static struct mrs_field id_aa64isar0_fields[] = {
223 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_DP_SHIFT),
224 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM4_SHIFT),
225 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM3_SHIFT),
226 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA3_SHIFT),
227 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_RDM_SHIFT),
228 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_Atomic_SHIFT),
229 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_CRC32_SHIFT),
230 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA2_SHIFT),
231 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA1_SHIFT),
232 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_AES_SHIFT),
233 	MRS_FIELD_END,
234 };
235 
236 static struct mrs_field id_aa64isar1_fields[] = {
237 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPI_SHIFT),
238 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPA_SHIFT),
239 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_LRCPC_SHIFT),
240 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_FCMA_SHIFT),
241 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_JSCVT_SHIFT),
242 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_API_SHIFT),
243 	MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_APA_SHIFT),
244 	MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_DPB_SHIFT),
245 	MRS_FIELD_END,
246 };
247 
248 static struct mrs_field id_aa64pfr0_fields[] = {
249 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT),
250 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT),
251 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT),
252 	MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_AdvSIMD_SHIFT),
253 	MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_FP_SHIFT),
254 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT),
255 	MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT),
256 	MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT),
257 	MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT),
258 	MRS_FIELD_END,
259 };
260 
261 static struct mrs_field id_aa64dfr0_fields[] = {
262 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMSVer_SHIFT),
263 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPs_SHIFT),
264 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPs_SHIFT),
265 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPs_SHIFT),
266 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMUVer_SHIFT),
267 	MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TraceVer_SHIFT),
268 	MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DebugVer_SHIFT),
269 	MRS_FIELD_END,
270 };
271 
272 struct mrs_user_reg {
273 	u_int		CRm;
274 	u_int		Op2;
275 	size_t		offset;
276 	struct mrs_field *fields;
277 };
278 
279 static struct mrs_user_reg user_regs[] = {
280 	{	/* id_aa64isar0_el1 */
281 		.CRm = 6,
282 		.Op2 = 0,
283 		.offset = __offsetof(struct cpu_desc, id_aa64isar0),
284 		.fields = id_aa64isar0_fields,
285 	},
286 	{	/* id_aa64isar1_el1 */
287 		.CRm = 6,
288 		.Op2 = 1,
289 		.offset = __offsetof(struct cpu_desc, id_aa64isar1),
290 		.fields = id_aa64isar1_fields,
291 	},
292 	{	/* id_aa64pfr0_el1 */
293 		.CRm = 4,
294 		.Op2 = 0,
295 		.offset = __offsetof(struct cpu_desc, id_aa64pfr0),
296 		.fields = id_aa64pfr0_fields,
297 	},
298 	{	/* id_aa64dfr0_el1 */
299 		.CRm = 5,
300 		.Op2 = 0,
301 		.offset = __offsetof(struct cpu_desc, id_aa64dfr0),
302 		.fields = id_aa64dfr0_fields,
303 	},
304 };
305 
306 #define	CPU_DESC_FIELD(desc, idx)					\
307     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
308 
309 static int
310 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
311     uint32_t esr)
312 {
313 	uint64_t value;
314 	int CRm, Op2, i, reg;
315 
316 	if ((insn & MRS_MASK) != MRS_VALUE)
317 		return (0);
318 
319 	/*
320 	 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
321 	 * These are in the EL1 CPU identification space.
322 	 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
323 	 * CRm == {4-7} holds the ID_AA64 registers.
324 	 *
325 	 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
326 	 * Table D9-2 System instruction encodings for non-Debug System
327 	 * register accesses.
328 	 */
329 	if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
330 		return (0);
331 
332 	CRm = mrs_CRm(insn);
333 	if (CRm > 7 || (CRm < 4 && CRm != 0))
334 		return (0);
335 
336 	Op2 = mrs_Op2(insn);
337 	value = 0;
338 
339 	for (i = 0; i < nitems(user_regs); i++) {
340 		if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
341 			value = CPU_DESC_FIELD(user_cpu_desc, i);
342 			break;
343 		}
344 	}
345 
346 	if (CRm == 0) {
347 		switch (Op2) {
348 		case 0:
349 			value = READ_SPECIALREG(midr_el1);
350 			break;
351 		case 5:
352 			value = READ_SPECIALREG(mpidr_el1);
353 			break;
354 		case 6:
355 			value = READ_SPECIALREG(revidr_el1);
356 			break;
357 		default:
358 			return (0);
359 		}
360 	}
361 
362 	/*
363 	 * We will handle this instruction, move to the next so we
364 	 * don't trap here again.
365 	 */
366 	frame->tf_elr += INSN_SIZE;
367 
368 	reg = MRS_REGISTER(insn);
369 	/* If reg is 31 then write to xzr, i.e. do nothing */
370 	if (reg == 31)
371 		return (1);
372 
373 	if (reg < nitems(frame->tf_x))
374 		frame->tf_x[reg] = value;
375 	else if (reg == 30)
376 		frame->tf_lr = value;
377 
378 	return (1);
379 }
380 
381 static void
382 update_user_regs(u_int cpu)
383 {
384 	struct mrs_field *fields;
385 	uint64_t cur, value;
386 	int i, j, cur_field, new_field;
387 
388 	for (i = 0; i < nitems(user_regs); i++) {
389 		value = CPU_DESC_FIELD(cpu_desc[cpu], i);
390 		if (cpu == 0)
391 			cur = value;
392 		else
393 			cur = CPU_DESC_FIELD(user_cpu_desc, i);
394 
395 		fields = user_regs[i].fields;
396 		for (j = 0; fields[j].type != 0; j++) {
397 			switch (fields[j].type & MRS_TYPE_MASK) {
398 			case MRS_EXACT:
399 				cur &= ~(0xfu << fields[j].shift);
400 				cur |=
401 				    (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
402 				    fields[j].shift;
403 				break;
404 			case MRS_LOWER:
405 				new_field = (value >> fields[j].shift) & 0xf;
406 				cur_field = (cur >> fields[j].shift) & 0xf;
407 				if ((fields[j].sign &&
408 				     (int)new_field < (int)cur_field) ||
409 				    (!fields[j].sign &&
410 				     (u_int)new_field < (u_int)cur_field)) {
411 					cur &= ~(0xfu << fields[j].shift);
412 					cur |= new_field << fields[j].shift;
413 				}
414 				break;
415 			default:
416 				panic("Invalid field type: %d", fields[j].type);
417 			}
418 		}
419 
420 		CPU_DESC_FIELD(user_cpu_desc, i) = cur;
421 	}
422 }
423 
424 /* HWCAP */
425 extern u_long elf_hwcap;
426 
427 static void
428 identify_cpu_sysinit(void *dummy __unused)
429 {
430 	int cpu;
431 	u_long hwcap;
432 
433 	/* Create a user visible cpu description with safe values */
434 	memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
435 	/* Safe values for these registers */
436 	user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
437 	    ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
438 	user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
439 
440 
441 	CPU_FOREACH(cpu) {
442 		print_cpu_features(cpu);
443 		hwcap = parse_cpu_features_hwcap(cpu);
444 		if (elf_hwcap == 0)
445 			elf_hwcap = hwcap;
446 		else
447 			elf_hwcap &= hwcap;
448 		update_user_regs(cpu);
449 	}
450 
451 	install_undef_handler(true, user_mrs_handler);
452 }
453 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
454 
455 static u_long
456 parse_cpu_features_hwcap(u_int cpu)
457 {
458 	u_long hwcap = 0;
459 
460 	if (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL)
461 		hwcap |= HWCAP_ASIMDDP;
462 
463 	if (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL)
464 		hwcap |= HWCAP_SM4;
465 
466 	if (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL)
467 		hwcap |= HWCAP_SM3;
468 
469 	if (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL)
470 		hwcap |= HWCAP_ASIMDRDM;
471 
472 	if (ID_AA64ISAR0_Atomic(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL)
473 		hwcap |= HWCAP_ATOMICS;
474 
475 	if (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE)
476 		hwcap |= HWCAP_CRC32;
477 
478 	switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
479 		case ID_AA64ISAR0_SHA2_BASE:
480 			hwcap |= HWCAP_SHA2;
481 			break;
482 		case ID_AA64ISAR0_SHA2_512:
483 			hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
484 			break;
485 	default:
486 		break;
487 	}
488 
489 	if (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0))
490 		hwcap |= HWCAP_SHA1;
491 
492 	switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
493 	case ID_AA64ISAR0_AES_BASE:
494 		hwcap |= HWCAP_AES;
495 		break;
496 	case ID_AA64ISAR0_AES_PMULL:
497 		hwcap |= HWCAP_PMULL | HWCAP_AES;
498 		break;
499 	default:
500 		break;
501 	}
502 
503 	if (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL)
504 		hwcap |= HWCAP_LRCPC;
505 
506 	if (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL)
507 		hwcap |= HWCAP_FCMA;
508 
509 	if (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL)
510 		hwcap |= HWCAP_JSCVT;
511 
512 	if (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL)
513 		hwcap |= HWCAP_DCPOP;
514 
515 	if (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL)
516 		hwcap |= HWCAP_SVE;
517 
518 	switch (ID_AA64PFR0_AdvSIMD(cpu_desc[cpu].id_aa64pfr0)) {
519 	case ID_AA64PFR0_AdvSIMD_IMPL:
520 		hwcap |= HWCAP_ASIMD;
521 		break;
522 	case ID_AA64PFR0_AdvSIMD_HP:
523 		hwcap |= HWCAP_ASIMD | HWCAP_ASIMDDP;
524 		break;
525 	default:
526 		break;
527 	}
528 
529 	switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
530 	case ID_AA64PFR0_FP_IMPL:
531 		hwcap |= HWCAP_FP;
532 		break;
533 	case ID_AA64PFR0_FP_HP:
534 		hwcap |= HWCAP_FP | HWCAP_FPHP;
535 		break;
536 	default:
537 		break;
538 	}
539 
540 	return (hwcap);
541 }
542 
543 static void
544 print_cpu_features(u_int cpu)
545 {
546 	struct sbuf *sb;
547 	int printed;
548 
549 	sb = sbuf_new_auto();
550 	sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
551 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
552 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
553 
554 	sbuf_cat(sb, " affinity:");
555 	switch(cpu_aff_levels) {
556 	default:
557 	case 4:
558 		sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
559 		/* FALLTHROUGH */
560 	case 3:
561 		sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
562 		/* FALLTHROUGH */
563 	case 2:
564 		sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
565 		/* FALLTHROUGH */
566 	case 1:
567 	case 0: /* On UP this will be zero */
568 		sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
569 		break;
570 	}
571 	sbuf_finish(sb);
572 	printf("%s\n", sbuf_data(sb));
573 	sbuf_clear(sb);
574 
575 	/*
576 	 * There is a hardware errata where, if one CPU is performing a TLB
577 	 * invalidation while another is performing a store-exclusive the
578 	 * store-exclusive may return the wrong status. A workaround seems
579 	 * to be to use an IPI to invalidate on each CPU, however given the
580 	 * limited number of affected units (pass 1.1 is the evaluation
581 	 * hardware revision), and the lack of information from Cavium
582 	 * this has not been implemented.
583 	 *
584 	 * At the time of writing this the only information is from:
585 	 * https://lkml.org/lkml/2016/8/4/722
586 	 */
587 	/*
588 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
589 	 * triggers on pass 2.0+.
590 	 */
591 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
592 	    CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
593 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
594 		    "hardware bugs that may cause the incorrect operation of "
595 		    "atomic operations.\n");
596 
597 #define SEP_STR	((printed++) == 0) ? "" : ","
598 
599 	/* AArch64 Instruction Set Attribute Register 0 */
600 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
601 		printed = 0;
602 		sbuf_printf(sb, " Instruction Set Attributes 0 = <");
603 
604 		switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
605 		case ID_AA64ISAR0_DP_NONE:
606 			break;
607 		case ID_AA64ISAR0_DP_IMPL:
608 			sbuf_printf(sb, "%sDotProd", SEP_STR);
609 			break;
610 		default:
611 			sbuf_printf(sb, "%sUnknown DP", SEP_STR);
612 			break;
613 		}
614 
615 		switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
616 		case ID_AA64ISAR0_SM4_NONE:
617 			break;
618 		case ID_AA64ISAR0_SM4_IMPL:
619 			sbuf_printf(sb, "%sSM4", SEP_STR);
620 			break;
621 		default:
622 			sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
623 			break;
624 		}
625 
626 		switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
627 		case ID_AA64ISAR0_SM3_NONE:
628 			break;
629 		case ID_AA64ISAR0_SM3_IMPL:
630 			sbuf_printf(sb, "%sSM3", SEP_STR);
631 			break;
632 		default:
633 			sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
634 			break;
635 		}
636 
637 		switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
638 		case ID_AA64ISAR0_SHA3_NONE:
639 			break;
640 		case ID_AA64ISAR0_SHA3_IMPL:
641 			sbuf_printf(sb, "%sSHA3", SEP_STR);
642 			break;
643 		default:
644 			sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
645 			break;
646 		}
647 
648 		switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
649 		case ID_AA64ISAR0_RDM_NONE:
650 			break;
651 		case ID_AA64ISAR0_RDM_IMPL:
652 			sbuf_printf(sb, "%sRDM", SEP_STR);
653 			break;
654 		default:
655 			sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
656 		}
657 
658 		switch (ID_AA64ISAR0_Atomic(cpu_desc[cpu].id_aa64isar0)) {
659 		case ID_AA64ISAR0_Atomic_NONE:
660 			break;
661 		case ID_AA64ISAR0_Atomic_IMPL:
662 			sbuf_printf(sb, "%sAtomic", SEP_STR);
663 			break;
664 		default:
665 			sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
666 		}
667 
668 		switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
669 		case ID_AA64ISAR0_CRC32_NONE:
670 			break;
671 		case ID_AA64ISAR0_CRC32_BASE:
672 			sbuf_printf(sb, "%sCRC32", SEP_STR);
673 			break;
674 		default:
675 			sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
676 			break;
677 		}
678 
679 		switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
680 		case ID_AA64ISAR0_SHA2_NONE:
681 			break;
682 		case ID_AA64ISAR0_SHA2_BASE:
683 			sbuf_printf(sb, "%sSHA2", SEP_STR);
684 			break;
685 		case ID_AA64ISAR0_SHA2_512:
686 			sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
687 			break;
688 		default:
689 			sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
690 			break;
691 		}
692 
693 		switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
694 		case ID_AA64ISAR0_SHA1_NONE:
695 			break;
696 		case ID_AA64ISAR0_SHA1_BASE:
697 			sbuf_printf(sb, "%sSHA1", SEP_STR);
698 			break;
699 		default:
700 			sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
701 			break;
702 		}
703 
704 		switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
705 		case ID_AA64ISAR0_AES_NONE:
706 			break;
707 		case ID_AA64ISAR0_AES_BASE:
708 			sbuf_printf(sb, "%sAES", SEP_STR);
709 			break;
710 		case ID_AA64ISAR0_AES_PMULL:
711 			sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
712 			break;
713 		default:
714 			sbuf_printf(sb, "%sUnknown AES", SEP_STR);
715 			break;
716 		}
717 
718 		if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
719 			sbuf_printf(sb, "%s%#lx", SEP_STR,
720 			    cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
721 
722 		sbuf_finish(sb);
723 		printf("%s>\n", sbuf_data(sb));
724 		sbuf_clear(sb);
725 	}
726 
727 	/* AArch64 Instruction Set Attribute Register 1 */
728 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
729 		printed = 0;
730 		sbuf_printf(sb, " Instruction Set Attributes 1 = <");
731 
732 		switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
733 		case ID_AA64ISAR1_GPI_NONE:
734 			break;
735 		case ID_AA64ISAR1_GPI_IMPL:
736 			sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
737 			break;
738 		default:
739 			sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
740 			break;
741 		}
742 
743 		switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
744 		case ID_AA64ISAR1_GPA_NONE:
745 			break;
746 		case ID_AA64ISAR1_GPA_IMPL:
747 			sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
748 			break;
749 		default:
750 			sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
751 			break;
752 		}
753 
754 		switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
755 		case ID_AA64ISAR1_LRCPC_NONE:
756 			break;
757 		case ID_AA64ISAR1_LRCPC_IMPL:
758 			sbuf_printf(sb, "%sRCpc", SEP_STR);
759 			break;
760 		default:
761 			sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
762 			break;
763 		}
764 
765 		switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
766 		case ID_AA64ISAR1_FCMA_NONE:
767 			break;
768 		case ID_AA64ISAR1_FCMA_IMPL:
769 			sbuf_printf(sb, "%sFCMA", SEP_STR);
770 			break;
771 		default:
772 			sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
773 			break;
774 		}
775 
776 		switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
777 		case ID_AA64ISAR1_JSCVT_NONE:
778 			break;
779 		case ID_AA64ISAR1_JSCVT_IMPL:
780 			sbuf_printf(sb, "%sJS Conv", SEP_STR);
781 			break;
782 		default:
783 			sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
784 			break;
785 		}
786 
787 		switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
788 		case ID_AA64ISAR1_API_NONE:
789 			break;
790 		case ID_AA64ISAR1_API_IMPL:
791 			sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
792 			break;
793 		default:
794 			sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
795 			break;
796 		}
797 
798 		switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
799 		case ID_AA64ISAR1_APA_NONE:
800 			break;
801 		case ID_AA64ISAR1_APA_IMPL:
802 			sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
803 			break;
804 		default:
805 			sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
806 			break;
807 		}
808 
809 		switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
810 		case ID_AA64ISAR1_DPB_NONE:
811 			break;
812 		case ID_AA64ISAR1_DPB_IMPL:
813 			sbuf_printf(sb, "%sDC CVAP", SEP_STR);
814 			break;
815 		default:
816 			sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
817 			break;
818 		}
819 
820 		if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
821 			sbuf_printf(sb, "%s%#lx", SEP_STR,
822 			    cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
823 		sbuf_finish(sb);
824 		printf("%s>\n", sbuf_data(sb));
825 		sbuf_clear(sb);
826 	}
827 
828 	/* AArch64 Processor Feature Register 0 */
829 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
830 		printed = 0;
831 		sbuf_printf(sb, "         Processor Features 0 = <");
832 
833 		switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
834 		case ID_AA64PFR0_SVE_NONE:
835 			break;
836 		case ID_AA64PFR0_SVE_IMPL:
837 			sbuf_printf(sb, "%sSVE", SEP_STR);
838 			break;
839 		default:
840 			sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
841 			break;
842 		}
843 
844 		switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
845 		case ID_AA64PFR0_RAS_NONE:
846 			break;
847 		case ID_AA64PFR0_RAS_V1:
848 			sbuf_printf(sb, "%sRASv1", SEP_STR);
849 			break;
850 		default:
851 			sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
852 			break;
853 		}
854 
855 		switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
856 		case ID_AA64PFR0_GIC_CPUIF_NONE:
857 			break;
858 		case ID_AA64PFR0_GIC_CPUIF_EN:
859 			sbuf_printf(sb, "%sGIC", SEP_STR);
860 			break;
861 		default:
862 			sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
863 			break;
864 		}
865 
866 		switch (ID_AA64PFR0_AdvSIMD(cpu_desc[cpu].id_aa64pfr0)) {
867 		case ID_AA64PFR0_AdvSIMD_NONE:
868 			break;
869 		case ID_AA64PFR0_AdvSIMD_IMPL:
870 			sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
871 			break;
872 		case ID_AA64PFR0_AdvSIMD_HP:
873 			sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
874 			break;
875 		default:
876 			sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
877 			break;
878 		}
879 
880 		switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
881 		case ID_AA64PFR0_FP_NONE:
882 			break;
883 		case ID_AA64PFR0_FP_IMPL:
884 			sbuf_printf(sb, "%sFloat", SEP_STR);
885 			break;
886 		case ID_AA64PFR0_FP_HP:
887 			sbuf_printf(sb, "%sFloat+HP", SEP_STR);
888 			break;
889 		default:
890 			sbuf_printf(sb, "%sUnknown Float", SEP_STR);
891 			break;
892 		}
893 
894 		switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
895 		case ID_AA64PFR0_EL3_NONE:
896 			sbuf_printf(sb, "%sNo EL3", SEP_STR);
897 			break;
898 		case ID_AA64PFR0_EL3_64:
899 			sbuf_printf(sb, "%sEL3", SEP_STR);
900 			break;
901 		case ID_AA64PFR0_EL3_64_32:
902 			sbuf_printf(sb, "%sEL3 32", SEP_STR);
903 			break;
904 		default:
905 			sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
906 			break;
907 		}
908 
909 		switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
910 		case ID_AA64PFR0_EL2_NONE:
911 			sbuf_printf(sb, "%sNo EL2", SEP_STR);
912 			break;
913 		case ID_AA64PFR0_EL2_64:
914 			sbuf_printf(sb, "%sEL2", SEP_STR);
915 			break;
916 		case ID_AA64PFR0_EL2_64_32:
917 			sbuf_printf(sb, "%sEL2 32", SEP_STR);
918 			break;
919 		default:
920 			sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
921 			break;
922 		}
923 
924 		switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
925 		case ID_AA64PFR0_EL1_64:
926 			sbuf_printf(sb, "%sEL1", SEP_STR);
927 			break;
928 		case ID_AA64PFR0_EL1_64_32:
929 			sbuf_printf(sb, "%sEL1 32", SEP_STR);
930 			break;
931 		default:
932 			sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
933 			break;
934 		}
935 
936 		switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
937 		case ID_AA64PFR0_EL0_64:
938 			sbuf_printf(sb, "%sEL0", SEP_STR);
939 			break;
940 		case ID_AA64PFR0_EL0_64_32:
941 			sbuf_printf(sb, "%sEL0 32", SEP_STR);
942 			break;
943 		default:
944 			sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
945 			break;
946 		}
947 
948 		if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
949 			sbuf_printf(sb, "%s%#lx", SEP_STR,
950 			    cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
951 
952 		sbuf_finish(sb);
953 		printf("%s>\n", sbuf_data(sb));
954 		sbuf_clear(sb);
955 	}
956 
957 	/* AArch64 Processor Feature Register 1 */
958 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
959 		printf("         Processor Features 1 = <%#lx>\n",
960 		    cpu_desc[cpu].id_aa64pfr1);
961 	}
962 
963 	/* AArch64 Memory Model Feature Register 0 */
964 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
965 		printed = 0;
966 		sbuf_printf(sb, "      Memory Model Features 0 = <");
967 		switch (ID_AA64MMFR0_TGran4(cpu_desc[cpu].id_aa64mmfr0)) {
968 		case ID_AA64MMFR0_TGran4_NONE:
969 			break;
970 		case ID_AA64MMFR0_TGran4_IMPL:
971 			sbuf_printf(sb, "%s4k Granule", SEP_STR);
972 			break;
973 		default:
974 			sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
975 			break;
976 		}
977 
978 		switch (ID_AA64MMFR0_TGran64(cpu_desc[cpu].id_aa64mmfr0)) {
979 		case ID_AA64MMFR0_TGran64_NONE:
980 			break;
981 		case ID_AA64MMFR0_TGran64_IMPL:
982 			sbuf_printf(sb, "%s64k Granule", SEP_STR);
983 			break;
984 		default:
985 			sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
986 			break;
987 		}
988 
989 		switch (ID_AA64MMFR0_TGran16(cpu_desc[cpu].id_aa64mmfr0)) {
990 		case ID_AA64MMFR0_TGran16_NONE:
991 			break;
992 		case ID_AA64MMFR0_TGran16_IMPL:
993 			sbuf_printf(sb, "%s16k Granule", SEP_STR);
994 			break;
995 		default:
996 			sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
997 			break;
998 		}
999 
1000 		switch (ID_AA64MMFR0_BigEndEL0(cpu_desc[cpu].id_aa64mmfr0)) {
1001 		case ID_AA64MMFR0_BigEndEL0_FIXED:
1002 			break;
1003 		case ID_AA64MMFR0_BigEndEL0_MIXED:
1004 			sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
1005 			break;
1006 		default:
1007 			sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
1008 			break;
1009 		}
1010 
1011 		switch (ID_AA64MMFR0_SNSMem(cpu_desc[cpu].id_aa64mmfr0)) {
1012 		case ID_AA64MMFR0_SNSMem_NONE:
1013 			break;
1014 		case ID_AA64MMFR0_SNSMem_DISTINCT:
1015 			sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
1016 			break;
1017 		default:
1018 			sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
1019 			break;
1020 		}
1021 
1022 		switch (ID_AA64MMFR0_BigEnd(cpu_desc[cpu].id_aa64mmfr0)) {
1023 		case ID_AA64MMFR0_BigEnd_FIXED:
1024 			break;
1025 		case ID_AA64MMFR0_BigEnd_MIXED:
1026 			sbuf_printf(sb, "%sMixedEndian", SEP_STR);
1027 			break;
1028 		default:
1029 			sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
1030 			break;
1031 		}
1032 
1033 		switch (ID_AA64MMFR0_ASIDBits(cpu_desc[cpu].id_aa64mmfr0)) {
1034 		case ID_AA64MMFR0_ASIDBits_8:
1035 			sbuf_printf(sb, "%s8bit ASID", SEP_STR);
1036 			break;
1037 		case ID_AA64MMFR0_ASIDBits_16:
1038 			sbuf_printf(sb, "%s16bit ASID", SEP_STR);
1039 			break;
1040 		default:
1041 			sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
1042 			break;
1043 		}
1044 
1045 		switch (ID_AA64MMFR0_PARange(cpu_desc[cpu].id_aa64mmfr0)) {
1046 		case ID_AA64MMFR0_PARange_4G:
1047 			sbuf_printf(sb, "%s4GB PA", SEP_STR);
1048 			break;
1049 		case ID_AA64MMFR0_PARange_64G:
1050 			sbuf_printf(sb, "%s64GB PA", SEP_STR);
1051 			break;
1052 		case ID_AA64MMFR0_PARange_1T:
1053 			sbuf_printf(sb, "%s1TB PA", SEP_STR);
1054 			break;
1055 		case ID_AA64MMFR0_PARange_4T:
1056 			sbuf_printf(sb, "%s4TB PA", SEP_STR);
1057 			break;
1058 		case ID_AA64MMFR0_PARange_16T:
1059 			sbuf_printf(sb, "%s16TB PA", SEP_STR);
1060 			break;
1061 		case ID_AA64MMFR0_PARange_256T:
1062 			sbuf_printf(sb, "%s256TB PA", SEP_STR);
1063 			break;
1064 		case ID_AA64MMFR0_PARange_4P:
1065 			sbuf_printf(sb, "%s4PB PA", SEP_STR);
1066 			break;
1067 		default:
1068 			sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
1069 			break;
1070 		}
1071 
1072 		if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
1073 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1074 			    cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
1075 		sbuf_finish(sb);
1076 		printf("%s>\n", sbuf_data(sb));
1077 		sbuf_clear(sb);
1078 	}
1079 
1080 	/* AArch64 Memory Model Feature Register 1 */
1081 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
1082 		printed = 0;
1083 		sbuf_printf(sb, "      Memory Model Features 1 = <");
1084 
1085 		switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
1086 		case ID_AA64MMFR1_XNX_NONE:
1087 			break;
1088 		case ID_AA64MMFR1_XNX_IMPL:
1089 			sbuf_printf(sb, "%sEL2 XN", SEP_STR);
1090 			break;
1091 		default:
1092 			sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
1093 			break;
1094 		}
1095 
1096 		switch (ID_AA64MMFR1_SpecSEI(cpu_desc[cpu].id_aa64mmfr1)) {
1097 		case ID_AA64MMFR1_SpecSEI_NONE:
1098 			break;
1099 		case ID_AA64MMFR1_SpecSEI_IMPL:
1100 			sbuf_printf(sb, "%sSpecSEI", SEP_STR);
1101 			break;
1102 		default:
1103 			sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
1104 			break;
1105 		}
1106 
1107 		switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
1108 		case ID_AA64MMFR1_PAN_NONE:
1109 			break;
1110 		case ID_AA64MMFR1_PAN_IMPL:
1111 			sbuf_printf(sb, "%sPAN", SEP_STR);
1112 			break;
1113 		case ID_AA64MMFR1_PAN_ATS1E1:
1114 			sbuf_printf(sb, "%sPAN+AT", SEP_STR);
1115 			break;
1116 		default:
1117 			sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
1118 			break;
1119 		}
1120 
1121 		switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
1122 		case ID_AA64MMFR1_LO_NONE:
1123 			break;
1124 		case ID_AA64MMFR1_LO_IMPL:
1125 			sbuf_printf(sb, "%sLO", SEP_STR);
1126 			break;
1127 		default:
1128 			sbuf_printf(sb, "%sUnknown LO", SEP_STR);
1129 			break;
1130 		}
1131 
1132 		switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
1133 		case ID_AA64MMFR1_HPDS_NONE:
1134 			break;
1135 		case ID_AA64MMFR1_HPDS_HPD:
1136 			sbuf_printf(sb, "%sHPDS", SEP_STR);
1137 			break;
1138 		case ID_AA64MMFR1_HPDS_TTPBHA:
1139 			sbuf_printf(sb, "%sTTPBHA", SEP_STR);
1140 			break;
1141 		default:
1142 			sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
1143 			break;
1144 		}
1145 
1146 		switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
1147 		case ID_AA64MMFR1_VH_NONE:
1148 			break;
1149 		case ID_AA64MMFR1_VH_IMPL:
1150 			sbuf_printf(sb, "%sVHE", SEP_STR);
1151 			break;
1152 		default:
1153 			sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
1154 			break;
1155 		}
1156 
1157 		switch (ID_AA64MMFR1_VMIDBits(cpu_desc[cpu].id_aa64mmfr1)) {
1158 		case ID_AA64MMFR1_VMIDBits_8:
1159 			break;
1160 		case ID_AA64MMFR1_VMIDBits_16:
1161 			sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
1162 			break;
1163 		default:
1164 			sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
1165 			break;
1166 		}
1167 
1168 		switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
1169 		case ID_AA64MMFR1_HAFDBS_NONE:
1170 			break;
1171 		case ID_AA64MMFR1_HAFDBS_AF:
1172 			sbuf_printf(sb, "%sAF", SEP_STR);
1173 			break;
1174 		case ID_AA64MMFR1_HAFDBS_AF_DBS:
1175 			sbuf_printf(sb, "%sAF+DBS", SEP_STR);
1176 			break;
1177 		default:
1178 			sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
1179 			break;
1180 		}
1181 
1182 		if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
1183 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1184 			    cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
1185 		sbuf_finish(sb);
1186 		printf("%s>\n", sbuf_data(sb));
1187 		sbuf_clear(sb);
1188 	}
1189 
1190 	/* AArch64 Memory Model Feature Register 2 */
1191 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
1192 		printed = 0;
1193 		sbuf_printf(sb, "      Memory Model Features 2 = <");
1194 
1195 		switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
1196 		case ID_AA64MMFR2_NV_NONE:
1197 			break;
1198 		case ID_AA64MMFR2_NV_IMPL:
1199 			sbuf_printf(sb, "%sNestedVirt", SEP_STR);
1200 			break;
1201 		default:
1202 			sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
1203 			break;
1204 		}
1205 
1206 		switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
1207 		case ID_AA64MMFR2_CCIDX_32:
1208 			sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
1209 			break;
1210 		case ID_AA64MMFR2_CCIDX_64:
1211 			sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
1212 			break;
1213 		default:
1214 			sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
1215 			break;
1216 		}
1217 
1218 		switch (ID_AA64MMFR2_VARange(cpu_desc[cpu].id_aa64mmfr2)) {
1219 		case ID_AA64MMFR2_VARange_48:
1220 			sbuf_printf(sb, "%s48b VA", SEP_STR);
1221 			break;
1222 		case ID_AA64MMFR2_VARange_52:
1223 			sbuf_printf(sb, "%s52b VA", SEP_STR);
1224 			break;
1225 		default:
1226 			sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
1227 			break;
1228 		}
1229 
1230 		switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
1231 		case ID_AA64MMFR2_IESB_NONE:
1232 			break;
1233 		case ID_AA64MMFR2_IESB_IMPL:
1234 			sbuf_printf(sb, "%sIESB", SEP_STR);
1235 			break;
1236 		default:
1237 			sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
1238 			break;
1239 		}
1240 
1241 		switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
1242 		case ID_AA64MMFR2_LSM_NONE:
1243 			break;
1244 		case ID_AA64MMFR2_LSM_IMPL:
1245 			sbuf_printf(sb, "%sLSM", SEP_STR);
1246 			break;
1247 		default:
1248 			sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
1249 			break;
1250 		}
1251 
1252 		switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
1253 		case ID_AA64MMFR2_UAO_NONE:
1254 			break;
1255 		case ID_AA64MMFR2_UAO_IMPL:
1256 			sbuf_printf(sb, "%sUAO", SEP_STR);
1257 			break;
1258 		default:
1259 			sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
1260 			break;
1261 		}
1262 
1263 		switch (ID_AA64MMFR2_CnP(cpu_desc[cpu].id_aa64mmfr2)) {
1264 		case ID_AA64MMFR2_CnP_NONE:
1265 			break;
1266 		case ID_AA64MMFR2_CnP_IMPL:
1267 			sbuf_printf(sb, "%sCnP", SEP_STR);
1268 			break;
1269 		default:
1270 			sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1271 			break;
1272 		}
1273 
1274 		if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1275 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1276 			    cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1277 		sbuf_finish(sb);
1278 		printf("%s>\n", sbuf_data(sb));
1279 		sbuf_clear(sb);
1280 	}
1281 
1282 	/* AArch64 Debug Feature Register 0 */
1283 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1284 		printed = 0;
1285 		sbuf_printf(sb, "             Debug Features 0 = <");
1286 		switch(ID_AA64DFR0_PMSVer(cpu_desc[cpu].id_aa64dfr0)) {
1287 		case ID_AA64DFR0_PMSVer_NONE:
1288 			break;
1289 		case ID_AA64DFR0_PMSVer_V1:
1290 			sbuf_printf(sb, "%sSPE v1", SEP_STR);
1291 			break;
1292 		default:
1293 			sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1294 			break;
1295 		}
1296 
1297 		sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1298 		    ID_AA64DFR0_CTX_CMPs(cpu_desc[cpu].id_aa64dfr0));
1299 
1300 		sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1301 		    ID_AA64DFR0_WRPs(cpu_desc[cpu].id_aa64dfr0));
1302 
1303 		sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1304 		    ID_AA64DFR0_BRPs(cpu_desc[cpu].id_aa64dfr0));
1305 
1306 		switch (ID_AA64DFR0_PMUVer(cpu_desc[cpu].id_aa64dfr0)) {
1307 		case ID_AA64DFR0_PMUVer_NONE:
1308 			break;
1309 		case ID_AA64DFR0_PMUVer_3:
1310 			sbuf_printf(sb, "%sPMUv3", SEP_STR);
1311 			break;
1312 		case ID_AA64DFR0_PMUVer_3_1:
1313 			sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1314 			break;
1315 		case ID_AA64DFR0_PMUVer_IMPL:
1316 			sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1317 			break;
1318 		default:
1319 			sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1320 			break;
1321 		}
1322 
1323 		switch (ID_AA64DFR0_TraceVer(cpu_desc[cpu].id_aa64dfr0)) {
1324 		case ID_AA64DFR0_TraceVer_NONE:
1325 			break;
1326 		case ID_AA64DFR0_TraceVer_IMPL:
1327 			sbuf_printf(sb, "%sTrace", SEP_STR);
1328 			break;
1329 		default:
1330 			sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1331 			break;
1332 		}
1333 
1334 		switch (ID_AA64DFR0_DebugVer(cpu_desc[cpu].id_aa64dfr0)) {
1335 		case ID_AA64DFR0_DebugVer_8:
1336 			sbuf_printf(sb, "%sDebug v8", SEP_STR);
1337 			break;
1338 		case ID_AA64DFR0_DebugVer_8_VHE:
1339 			sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1340 			break;
1341 		case ID_AA64DFR0_DebugVer_8_2:
1342 			sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1343 			break;
1344 		default:
1345 			sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1346 			break;
1347 		}
1348 
1349 		if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1350 			sbuf_printf(sb, "%s%#lx", SEP_STR,
1351 			    cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1352 		sbuf_finish(sb);
1353 		printf("%s>\n", sbuf_data(sb));
1354 		sbuf_clear(sb);
1355 	}
1356 
1357 	/* AArch64 Memory Model Feature Register 1 */
1358 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1359 		printf("             Debug Features 1 = <%#lx>\n",
1360 		    cpu_desc[cpu].id_aa64dfr1);
1361 	}
1362 
1363 	/* AArch64 Auxiliary Feature Register 0 */
1364 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1365 		printf("         Auxiliary Features 0 = <%#lx>\n",
1366 		    cpu_desc[cpu].id_aa64afr0);
1367 	}
1368 
1369 	/* AArch64 Auxiliary Feature Register 1 */
1370 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1371 		printf("         Auxiliary Features 1 = <%#lx>\n",
1372 		    cpu_desc[cpu].id_aa64afr1);
1373 	}
1374 
1375 	sbuf_delete(sb);
1376 	sb = NULL;
1377 #undef SEP_STR
1378 }
1379 
1380 void
1381 identify_cpu(void)
1382 {
1383 	u_int midr;
1384 	u_int impl_id;
1385 	u_int part_id;
1386 	u_int cpu;
1387 	size_t i;
1388 	const struct cpu_parts *cpu_partsp = NULL;
1389 
1390 	cpu = PCPU_GET(cpuid);
1391 	midr = get_midr();
1392 
1393 	/*
1394 	 * Store midr to pcpu to allow fast reading
1395 	 * from EL0, EL1 and assembly code.
1396 	 */
1397 	PCPU_SET(midr, midr);
1398 
1399 	impl_id = CPU_IMPL(midr);
1400 	for (i = 0; i < nitems(cpu_implementers); i++) {
1401 		if (impl_id == cpu_implementers[i].impl_id ||
1402 		    cpu_implementers[i].impl_id == 0) {
1403 			cpu_desc[cpu].cpu_impl = impl_id;
1404 			cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1405 			cpu_partsp = cpu_implementers[i].cpu_parts;
1406 			break;
1407 		}
1408 	}
1409 
1410 	part_id = CPU_PART(midr);
1411 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
1412 		if (part_id == cpu_partsp[i].part_id ||
1413 		    cpu_partsp[i].part_id == 0) {
1414 			cpu_desc[cpu].cpu_part_num = part_id;
1415 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1416 			break;
1417 		}
1418 	}
1419 
1420 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1421 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1422 
1423 	snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
1424 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1425 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1426 
1427 	/* Save affinity for current CPU */
1428 	cpu_desc[cpu].mpidr = get_mpidr();
1429 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1430 
1431 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1432 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1433 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1434 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1435 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1436 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1437 	cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1438 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1439 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1440 
1441 	if (cpu != 0) {
1442 		/*
1443 		 * This code must run on one cpu at a time, but we are
1444 		 * not scheduling on the current core so implement a
1445 		 * simple spinlock.
1446 		 */
1447 		while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1448 			__asm __volatile("wfe" ::: "memory");
1449 
1450 		switch (cpu_aff_levels) {
1451 		case 0:
1452 			if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1453 			    CPU_AFF0(cpu_desc[0].mpidr))
1454 				cpu_aff_levels = 1;
1455 			/* FALLTHROUGH */
1456 		case 1:
1457 			if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1458 			    CPU_AFF1(cpu_desc[0].mpidr))
1459 				cpu_aff_levels = 2;
1460 			/* FALLTHROUGH */
1461 		case 2:
1462 			if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1463 			    CPU_AFF2(cpu_desc[0].mpidr))
1464 				cpu_aff_levels = 3;
1465 			/* FALLTHROUGH */
1466 		case 3:
1467 			if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1468 			    CPU_AFF3(cpu_desc[0].mpidr))
1469 				cpu_aff_levels = 4;
1470 			break;
1471 		}
1472 
1473 		if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1474 			cpu_print_regs |= PRINT_ID_AA64_AFR0;
1475 		if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1476 			cpu_print_regs |= PRINT_ID_AA64_AFR1;
1477 
1478 		if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1479 			cpu_print_regs |= PRINT_ID_AA64_DFR0;
1480 		if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1481 			cpu_print_regs |= PRINT_ID_AA64_DFR1;
1482 
1483 		if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1484 			cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1485 		if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1486 			cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1487 
1488 		if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1489 			cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1490 		if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1491 			cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1492 		if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1493 			cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1494 
1495 		if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1496 			cpu_print_regs |= PRINT_ID_AA64_PFR0;
1497 		if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1498 			cpu_print_regs |= PRINT_ID_AA64_PFR1;
1499 
1500 		/* Wake up the other CPUs */
1501 		atomic_store_rel_int(&ident_lock, 0);
1502 		__asm __volatile("sev" ::: "memory");
1503 	}
1504 }
1505