xref: /freebsd/sys/arm64/arm64/identcpu.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47 #include <machine/elf.h>
48 
49 static void print_cpu_features(u_int cpu);
50 static u_long parse_cpu_features_hwcap(u_int cpu);
51 
52 char machine[] = "arm64";
53 
54 #ifdef SCTL_MASK32
55 extern int adaptive_machine_arch;
56 #endif
57 
58 static SYSCTL_NODE(_machdep, OID_AUTO, cache, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
59     "Cache management tuning");
60 
61 static int allow_dic = 1;
62 SYSCTL_INT(_machdep_cache, OID_AUTO, allow_dic, CTLFLAG_RDTUN, &allow_dic, 0,
63     "Allow optimizations based on the DIC cache bit");
64 
65 static int allow_idc = 1;
66 SYSCTL_INT(_machdep_cache, OID_AUTO, allow_idc, CTLFLAG_RDTUN, &allow_idc, 0,
67     "Allow optimizations based on the IDC cache bit");
68 
69 static void check_cpu_regs(u_int cpu);
70 
71 /*
72  * The default implementation of I-cache sync assumes we have an
73  * aliasing cache until we know otherwise.
74  */
75 void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) =
76     &arm64_aliasing_icache_sync_range;
77 
78 static int
79 sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
80 {
81 #ifdef SCTL_MASK32
82 	static const char machine32[] = "arm";
83 #endif
84 	int error;
85 #ifdef SCTL_MASK32
86 	if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
87 		error = SYSCTL_OUT(req, machine32, sizeof(machine32));
88 	else
89 #endif
90 		error = SYSCTL_OUT(req, machine, sizeof(machine));
91 	return (error);
92 }
93 
94 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
95 	CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
96 
97 static char cpu_model[64];
98 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
99 	cpu_model, sizeof(cpu_model), "Machine model");
100 
101 /*
102  * Per-CPU affinity as provided in MPIDR_EL1
103  * Indexed by CPU number in logical order selected by the system.
104  * Relevant fields can be extracted using CPU_AFFn macros,
105  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
106  *
107  * Fields used by us:
108  * Aff1 - Cluster number
109  * Aff0 - CPU number in Aff1 cluster
110  */
111 uint64_t __cpu_affinity[MAXCPU];
112 static u_int cpu_aff_levels;
113 
114 struct cpu_desc {
115 	u_int		cpu_impl;
116 	u_int		cpu_part_num;
117 	u_int		cpu_variant;
118 	u_int		cpu_revision;
119 	const char	*cpu_impl_name;
120 	const char	*cpu_part_name;
121 
122 	uint64_t	mpidr;
123 	uint64_t	id_aa64afr0;
124 	uint64_t	id_aa64afr1;
125 	uint64_t	id_aa64dfr0;
126 	uint64_t	id_aa64dfr1;
127 	uint64_t	id_aa64isar0;
128 	uint64_t	id_aa64isar1;
129 	uint64_t	id_aa64mmfr0;
130 	uint64_t	id_aa64mmfr1;
131 	uint64_t	id_aa64mmfr2;
132 	uint64_t	id_aa64pfr0;
133 	uint64_t	id_aa64pfr1;
134 	uint64_t	ctr;
135 };
136 
137 static struct cpu_desc cpu_desc[MAXCPU];
138 static struct cpu_desc kern_cpu_desc;
139 static struct cpu_desc user_cpu_desc;
140 static u_int cpu_print_regs;
141 #define	PRINT_ID_AA64_AFR0	0x00000001
142 #define	PRINT_ID_AA64_AFR1	0x00000002
143 #define	PRINT_ID_AA64_DFR0	0x00000010
144 #define	PRINT_ID_AA64_DFR1	0x00000020
145 #define	PRINT_ID_AA64_ISAR0	0x00000100
146 #define	PRINT_ID_AA64_ISAR1	0x00000200
147 #define	PRINT_ID_AA64_MMFR0	0x00001000
148 #define	PRINT_ID_AA64_MMFR1	0x00002000
149 #define	PRINT_ID_AA64_MMFR2	0x00004000
150 #define	PRINT_ID_AA64_PFR0	0x00010000
151 #define	PRINT_ID_AA64_PFR1	0x00020000
152 #define	PRINT_CTR_EL0		0x10000000
153 
154 struct cpu_parts {
155 	u_int		part_id;
156 	const char	*part_name;
157 };
158 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
159 
160 struct cpu_implementers {
161 	u_int			impl_id;
162 	const char		*impl_name;
163 	/*
164 	 * Part number is implementation defined
165 	 * so each vendor will have its own set of values and names.
166 	 */
167 	const struct cpu_parts	*cpu_parts;
168 };
169 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
170 
171 /*
172  * Per-implementer table of (PartNum, CPU Name) pairs.
173  */
174 /* ARM Ltd. */
175 static const struct cpu_parts cpu_parts_arm[] = {
176 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
177 	{ CPU_PART_CORTEX_A35, "Cortex-A35" },
178 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
179 	{ CPU_PART_CORTEX_A55, "Cortex-A55" },
180 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
181 	{ CPU_PART_CORTEX_A65, "Cortex-A65" },
182 	{ CPU_PART_CORTEX_A72, "Cortex-A72" },
183 	{ CPU_PART_CORTEX_A73, "Cortex-A73" },
184 	{ CPU_PART_CORTEX_A75, "Cortex-A75" },
185 	{ CPU_PART_CORTEX_A76, "Cortex-A76" },
186 	{ CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
187 	{ CPU_PART_CORTEX_A77, "Cortex-A77" },
188 	{ CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
189 	CPU_PART_NONE,
190 };
191 /* Cavium */
192 static const struct cpu_parts cpu_parts_cavium[] = {
193 	{ CPU_PART_THUNDERX, "ThunderX" },
194 	{ CPU_PART_THUNDERX2, "ThunderX2" },
195 	CPU_PART_NONE,
196 };
197 
198 /* APM / Ampere */
199 static const struct cpu_parts cpu_parts_apm[] = {
200 	{ CPU_PART_EMAG8180, "eMAG 8180" },
201 	CPU_PART_NONE,
202 };
203 
204 /* Unknown */
205 static const struct cpu_parts cpu_parts_none[] = {
206 	CPU_PART_NONE,
207 };
208 
209 /*
210  * Implementers table.
211  */
212 const struct cpu_implementers cpu_implementers[] = {
213 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
214 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
215 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
216 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
217 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
218 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
219 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
220 	{ CPU_IMPL_APM,		"APM",		cpu_parts_apm },
221 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
222 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
223 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
224 	CPU_IMPLEMENTER_NONE,
225 };
226 
227 #define	MRS_TYPE_MASK		0xf
228 #define	MRS_INVALID		0
229 #define	MRS_EXACT		1
230 #define	MRS_EXACT_VAL(x)	(MRS_EXACT | ((x) << 4))
231 #define	MRS_EXACT_FIELD(x)	((x) >> 4)
232 #define	MRS_LOWER		2
233 
234 struct mrs_field_value {
235 	uint64_t	value;
236 	const char	*desc;
237 };
238 
239 #define	MRS_FIELD_VALUE(_value, _desc)					\
240 	{								\
241 		.value = (_value),					\
242 		.desc = (_desc),					\
243 	}
244 
245 #define	MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl)		\
246 	MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""),		\
247 	MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field)
248 
249 #define	MRS_FIELD_VALUE_COUNT(_reg, _field, _desc)			\
250 	MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \
251 	MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \
252 	MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \
253 	MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \
254 	MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \
255 	MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \
256 	MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \
257 	MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \
258 	MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \
259 	MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \
260 	MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \
261 	MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \
262 	MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \
263 	MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \
264 	MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \
265 	MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s")
266 
267 #define	MRS_FIELD_VALUE_END	{ .desc = NULL }
268 
269 struct mrs_field {
270 	const char	*name;
271 	struct mrs_field_value *values;
272 	uint64_t	mask;
273 	bool		sign;
274 	u_int		type;
275 	u_int		shift;
276 };
277 
278 #define	MRS_FIELD(_register, _name, _sign, _type, _values)		\
279 	{								\
280 		.name = #_name,						\
281 		.sign = (_sign),					\
282 		.type = (_type),					\
283 		.shift = _register ## _ ## _name ## _SHIFT,		\
284 		.mask = _register ## _ ## _name ## _MASK,		\
285 		.values = (_values),					\
286 	}
287 
288 #define	MRS_FIELD_END	{ .type = MRS_INVALID, }
289 
290 /* ID_AA64AFR0_EL1 */
291 static struct mrs_field id_aa64afr0_fields[] = {
292 	MRS_FIELD_END,
293 };
294 
295 
296 /* ID_AA64AFR1_EL1 */
297 static struct mrs_field id_aa64afr1_fields[] = {
298 	MRS_FIELD_END,
299 };
300 
301 
302 /* ID_AA64DFR0_EL1 */
303 static struct mrs_field_value id_aa64dfr0_pmsver[] = {
304 	MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""),
305 	MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_V1, "SPE"),
306 	MRS_FIELD_VALUE_END,
307 };
308 
309 static struct mrs_field_value id_aa64dfr0_ctx_cmps[] = {
310 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"),
311 	MRS_FIELD_VALUE_END,
312 };
313 
314 static struct mrs_field_value id_aa64dfr0_wrps[] = {
315 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"),
316 	MRS_FIELD_VALUE_END,
317 };
318 
319 static struct mrs_field_value id_aa64dfr0_brps[] = {
320 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"),
321 	MRS_FIELD_VALUE_END,
322 };
323 
324 static struct mrs_field_value id_aa64dfr0_pmuver[] = {
325 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""),
326 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"),
327 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3+16 bit evtCount"),
328 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"),
329 	MRS_FIELD_VALUE_END,
330 };
331 
332 static struct mrs_field_value id_aa64dfr0_tracever[] = {
333 	MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""),
334 	MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"),
335 	MRS_FIELD_VALUE_END,
336 };
337 
338 static struct mrs_field_value id_aa64dfr0_debugver[] = {
339 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"),
340 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"),
341 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8.2"),
342 	MRS_FIELD_VALUE_END,
343 };
344 
345 static struct mrs_field id_aa64dfr0_fields[] = {
346 	MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver),
347 	MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT,
348 	    id_aa64dfr0_ctx_cmps),
349 	MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_EXACT, id_aa64dfr0_wrps),
350 	MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps),
351 	MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver),
352 	MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT,
353 	    id_aa64dfr0_tracever),
354 	MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6),
355 	    id_aa64dfr0_debugver),
356 	MRS_FIELD_END,
357 };
358 
359 
360 /* ID_AA64DFR1 */
361 static struct mrs_field id_aa64dfr1_fields[] = {
362 	MRS_FIELD_END,
363 };
364 
365 
366 /* ID_AA64ISAR0_EL1 */
367 static struct mrs_field_value id_aa64isar0_dp[] = {
368 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL),
369 	MRS_FIELD_VALUE_END,
370 };
371 
372 static struct mrs_field_value id_aa64isar0_sm4[] = {
373 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL),
374 	MRS_FIELD_VALUE_END,
375 };
376 
377 static struct mrs_field_value id_aa64isar0_sm3[] = {
378 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL),
379 	MRS_FIELD_VALUE_END,
380 };
381 
382 static struct mrs_field_value id_aa64isar0_sha3[] = {
383 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL),
384 	MRS_FIELD_VALUE_END,
385 };
386 
387 static struct mrs_field_value id_aa64isar0_rdm[] = {
388 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL),
389 	MRS_FIELD_VALUE_END,
390 };
391 
392 static struct mrs_field_value id_aa64isar0_atomic[] = {
393 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL),
394 	MRS_FIELD_VALUE_END,
395 };
396 
397 static struct mrs_field_value id_aa64isar0_crc32[] = {
398 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE),
399 	MRS_FIELD_VALUE_END,
400 };
401 
402 static struct mrs_field_value id_aa64isar0_sha2[] = {
403 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE),
404 	MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"),
405 	MRS_FIELD_VALUE_END,
406 };
407 
408 static struct mrs_field_value id_aa64isar0_sha1[] = {
409 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE),
410 	MRS_FIELD_VALUE_END,
411 };
412 
413 static struct mrs_field_value id_aa64isar0_aes[] = {
414 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE),
415 	MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"),
416 	MRS_FIELD_VALUE_END,
417 };
418 
419 static struct mrs_field id_aa64isar0_fields[] = {
420 	MRS_FIELD(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp),
421 	MRS_FIELD(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4),
422 	MRS_FIELD(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3),
423 	MRS_FIELD(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3),
424 	MRS_FIELD(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm),
425 	MRS_FIELD(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic),
426 	MRS_FIELD(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32),
427 	MRS_FIELD(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2),
428 	MRS_FIELD(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1),
429 	MRS_FIELD(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes),
430 	MRS_FIELD_END,
431 };
432 
433 
434 /* ID_AA64ISAR1_EL1 */
435 static struct mrs_field_value id_aa64isar1_gpi[] = {
436 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL),
437 	MRS_FIELD_VALUE_END,
438 };
439 
440 static struct mrs_field_value id_aa64isar1_gpa[] = {
441 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
442 	MRS_FIELD_VALUE_END,
443 };
444 
445 static struct mrs_field_value id_aa64isar1_lrcpc[] = {
446 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, LRCPC, NONE, IMPL),
447 	MRS_FIELD_VALUE_END,
448 };
449 
450 static struct mrs_field_value id_aa64isar1_fcma[] = {
451 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL),
452 	MRS_FIELD_VALUE_END,
453 };
454 
455 static struct mrs_field_value id_aa64isar1_jscvt[] = {
456 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL),
457 	MRS_FIELD_VALUE_END,
458 };
459 
460 static struct mrs_field_value id_aa64isar1_api[] = {
461 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, API, NONE, IMPL),
462 	MRS_FIELD_VALUE_END,
463 };
464 
465 static struct mrs_field_value id_aa64isar1_apa[] = {
466 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
467 	MRS_FIELD_VALUE_END,
468 };
469 
470 static struct mrs_field_value id_aa64isar1_dpb[] = {
471 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DPB, NONE, IMPL),
472 	MRS_FIELD_VALUE_END,
473 };
474 
475 static struct mrs_field id_aa64isar1_fields[] = {
476 	MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi),
477 	MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa),
478 	MRS_FIELD(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc),
479 	MRS_FIELD(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma),
480 	MRS_FIELD(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt),
481 	MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api),
482 	MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa),
483 	MRS_FIELD(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb),
484 	MRS_FIELD_END,
485 };
486 
487 
488 /* ID_AA64MMFR0_EL1 */
489 static struct mrs_field_value id_aa64mmfr0_tgran4[] = {
490 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL),
491 	MRS_FIELD_VALUE_END,
492 };
493 
494 static struct mrs_field_value id_aa64mmfr0_tgran64[] = {
495 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL),
496 	MRS_FIELD_VALUE_END,
497 };
498 
499 static struct mrs_field_value id_aa64mmfr0_tgran16[] = {
500 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL),
501 	MRS_FIELD_VALUE_END,
502 };
503 
504 static struct mrs_field_value id_aa64mmfr0_bigend_el0[] = {
505 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED),
506 	MRS_FIELD_VALUE_END,
507 };
508 
509 static struct mrs_field_value id_aa64mmfr0_snsmem[] = {
510 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT),
511 	MRS_FIELD_VALUE_END,
512 };
513 
514 static struct mrs_field_value id_aa64mmfr0_bigend[] = {
515 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED),
516 	MRS_FIELD_VALUE_END,
517 };
518 
519 static struct mrs_field_value id_aa64mmfr0_asid_bits[] = {
520 	MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"),
521 	MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"),
522 	MRS_FIELD_VALUE_END,
523 };
524 
525 static struct mrs_field_value id_aa64mmfr0_parange[] = {
526 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"),
527 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"),
528 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"),
529 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"),
530 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"),
531 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"),
532 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"),
533 	MRS_FIELD_VALUE_END,
534 };
535 
536 static struct mrs_field id_aa64mmfr0_fields[] = {
537 	MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4),
538 	MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT,
539 	    id_aa64mmfr0_tgran64),
540 	MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT,
541 	    id_aa64mmfr0_tgran16),
542 	MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT,
543 	    id_aa64mmfr0_bigend_el0),
544 	MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem),
545 	MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend),
546 	MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT,
547 	    id_aa64mmfr0_asid_bits),
548 	MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT,
549 	    id_aa64mmfr0_parange),
550 	MRS_FIELD_END,
551 };
552 
553 
554 /* ID_AA64MMFR1_EL1 */
555 static struct mrs_field_value id_aa64mmfr1_xnx[] = {
556 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL),
557 	MRS_FIELD_VALUE_END,
558 };
559 
560 static struct mrs_field_value id_aa64mmfr1_specsei[] = {
561 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL),
562 	MRS_FIELD_VALUE_END,
563 };
564 
565 static struct mrs_field_value id_aa64mmfr1_pan[] = {
566 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL),
567 	MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"),
568 	MRS_FIELD_VALUE_END,
569 };
570 
571 static struct mrs_field_value id_aa64mmfr1_lo[] = {
572 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL),
573 	MRS_FIELD_VALUE_END,
574 };
575 
576 static struct mrs_field_value id_aa64mmfr1_hpds[] = {
577 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""),
578 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"),
579 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"),
580 	MRS_FIELD_VALUE_END,
581 };
582 
583 static struct mrs_field_value id_aa64mmfr1_vh[] = {
584 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL),
585 	MRS_FIELD_VALUE_END,
586 };
587 
588 static struct mrs_field_value id_aa64mmfr1_vmidbits[] = {
589 	MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"),
590 	MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"),
591 	MRS_FIELD_VALUE_END,
592 };
593 
594 static struct mrs_field_value id_aa64mmfr1_hafdbs[] = {
595 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""),
596 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"),
597 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"),
598 	MRS_FIELD_VALUE_END,
599 };
600 
601 static struct mrs_field id_aa64mmfr1_fields[] = {
602 	MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx),
603 	MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT,
604 	    id_aa64mmfr1_specsei),
605 	MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan),
606 	MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo),
607 	MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds),
608 	MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh),
609 	MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT,
610 	    id_aa64mmfr1_vmidbits),
611 	MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs),
612 	MRS_FIELD_END,
613 };
614 
615 
616 /* ID_AA64MMFR2_EL1 */
617 static struct mrs_field_value id_aa64mmfr2_nv[] = {
618 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, IMPL),
619 	MRS_FIELD_VALUE_END,
620 };
621 
622 static struct mrs_field_value id_aa64mmfr2_ccidx[] = {
623 	MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"),
624 	MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "64bit CCIDX"),
625 	MRS_FIELD_VALUE_END,
626 };
627 
628 static struct mrs_field_value id_aa64mmfr2_varange[] = {
629 	MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"),
630 	MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"),
631 	MRS_FIELD_VALUE_END,
632 };
633 
634 static struct mrs_field_value id_aa64mmfr2_iesb[] = {
635 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL),
636 	MRS_FIELD_VALUE_END,
637 };
638 
639 static struct mrs_field_value id_aa64mmfr2_lsm[] = {
640 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL),
641 	MRS_FIELD_VALUE_END,
642 };
643 
644 static struct mrs_field_value id_aa64mmfr2_uao[] = {
645 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL),
646 	MRS_FIELD_VALUE_END,
647 };
648 
649 static struct mrs_field_value id_aa64mmfr2_cnp[] = {
650 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL),
651 	MRS_FIELD_VALUE_END,
652 };
653 
654 static struct mrs_field id_aa64mmfr2_fields[] = {
655 	MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv),
656 	MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx),
657 	MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT,
658 	    id_aa64mmfr2_varange),
659 	MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb),
660 	MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm),
661 	MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao),
662 	MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp),
663 	MRS_FIELD_END,
664 };
665 
666 
667 /* ID_AA64PFR0_EL1 */
668 static struct mrs_field_value id_aa64pfr0_csv3[] = {
669 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""),
670 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"),
671 	MRS_FIELD_VALUE_END,
672 };
673 
674 static struct mrs_field_value id_aa64pfr0_csv2[] = {
675 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""),
676 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"),
677 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "SCXTNUM"),
678 	MRS_FIELD_VALUE_END,
679 };
680 
681 static struct mrs_field_value id_aa64pfr0_dit[] = {
682 	MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""),
683 	MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"),
684 	MRS_FIELD_VALUE_END,
685 };
686 
687 static struct mrs_field_value id_aa64pfr0_amu[] = {
688 	MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""),
689 	MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"),
690 	MRS_FIELD_VALUE_END,
691 };
692 
693 static struct mrs_field_value id_aa64pfr0_mpam[] = {
694 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL),
695 	MRS_FIELD_VALUE_END,
696 };
697 
698 static struct mrs_field_value id_aa64pfr0_sel2[] = {
699 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL),
700 	MRS_FIELD_VALUE_END,
701 };
702 
703 static struct mrs_field_value id_aa64pfr0_sve[] = {
704 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL),
705 	MRS_FIELD_VALUE_END,
706 };
707 
708 static struct mrs_field_value id_aa64pfr0_ras[] = {
709 	MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""),
710 	MRS_FIELD_VALUE(ID_AA64PFR0_RAS_V1, "RASv1"),
711 	MRS_FIELD_VALUE_END,
712 };
713 
714 static struct mrs_field_value id_aa64pfr0_gic[] = {
715 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN),
716 	MRS_FIELD_VALUE_END,
717 };
718 
719 static struct mrs_field_value id_aa64pfr0_advsimd[] = {
720 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL),
721 	MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"),
722 	MRS_FIELD_VALUE_END,
723 };
724 
725 static struct mrs_field_value id_aa64pfr0_fp[] = {
726 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL),
727 	MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"),
728 	MRS_FIELD_VALUE_END,
729 };
730 
731 static struct mrs_field_value id_aa64pfr0_el3[] = {
732 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64),
733 	MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"),
734 	MRS_FIELD_VALUE_END,
735 };
736 
737 static struct mrs_field_value id_aa64pfr0_el2[] = {
738 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64),
739 	MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"),
740 	MRS_FIELD_VALUE_END,
741 };
742 
743 static struct mrs_field_value id_aa64pfr0_el1[] = {
744 	MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"),
745 	MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"),
746 	MRS_FIELD_VALUE_END,
747 };
748 
749 static struct mrs_field_value id_aa64pfr0_el0[] = {
750 	MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"),
751 	MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"),
752 	MRS_FIELD_VALUE_END,
753 };
754 
755 static struct mrs_field id_aa64pfr0_fields[] = {
756 	MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3),
757 	MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2),
758 	MRS_FIELD(ID_AA64PFR0, DIT, false, MRS_EXACT, id_aa64pfr0_dit),
759 	MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu),
760 	MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam),
761 	MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2),
762 	MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve),
763 	MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras),
764 	MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic),
765 	MRS_FIELD(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd),
766 	MRS_FIELD(ID_AA64PFR0, FP, true,  MRS_LOWER, id_aa64pfr0_fp),
767 	MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3),
768 	MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2),
769 	MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1),
770 	MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0),
771 	MRS_FIELD_END,
772 };
773 
774 
775 /* ID_AA64PFR1_EL1 */
776 static struct mrs_field_value id_aa64pfr1_bt[] = {
777 	MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""),
778 	MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"),
779 	MRS_FIELD_VALUE_END,
780 };
781 
782 static struct mrs_field_value id_aa64pfr1_ssbs[] = {
783 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""),
784 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"),
785 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"),
786 	MRS_FIELD_VALUE_END,
787 };
788 
789 static struct mrs_field_value id_aa64pfr1_mte[] = {
790 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""),
791 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL_EL0, "MTE EL0"),
792 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL, "MTE"),
793 	MRS_FIELD_VALUE_END,
794 };
795 
796 static struct mrs_field id_aa64pfr1_fields[] = {
797 	MRS_FIELD(ID_AA64PFR1, BT, false, MRS_EXACT, id_aa64pfr1_bt),
798 	MRS_FIELD(ID_AA64PFR1, SSBS, false, MRS_EXACT, id_aa64pfr1_ssbs),
799 	MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte),
800 	MRS_FIELD_END,
801 };
802 
803 struct mrs_user_reg {
804 	u_int		reg;
805 	u_int		CRm;
806 	u_int		Op2;
807 	size_t		offset;
808 	struct mrs_field *fields;
809 };
810 
811 static struct mrs_user_reg user_regs[] = {
812 	{	/* id_aa64isar0_el1 */
813 		.reg = ID_AA64ISAR0_EL1,
814 		.CRm = 6,
815 		.Op2 = 0,
816 		.offset = __offsetof(struct cpu_desc, id_aa64isar0),
817 		.fields = id_aa64isar0_fields,
818 	},
819 	{	/* id_aa64isar1_el1 */
820 		.reg = ID_AA64ISAR1_EL1,
821 		.CRm = 6,
822 		.Op2 = 1,
823 		.offset = __offsetof(struct cpu_desc, id_aa64isar1),
824 		.fields = id_aa64isar1_fields,
825 	},
826 	{	/* id_aa64pfr0_el1 */
827 		.reg = ID_AA64PFR0_EL1,
828 		.CRm = 4,
829 		.Op2 = 0,
830 		.offset = __offsetof(struct cpu_desc, id_aa64pfr0),
831 		.fields = id_aa64pfr0_fields,
832 	},
833 	{	/* id_aa64pfr0_el1 */
834 		.reg = ID_AA64PFR1_EL1,
835 		.CRm = 4,
836 		.Op2 = 1,
837 		.offset = __offsetof(struct cpu_desc, id_aa64pfr1),
838 		.fields = id_aa64pfr1_fields,
839 	},
840 	{	/* id_aa64dfr0_el1 */
841 		.reg = ID_AA64DFR0_EL1,
842 		.CRm = 5,
843 		.Op2 = 0,
844 		.offset = __offsetof(struct cpu_desc, id_aa64dfr0),
845 		.fields = id_aa64dfr0_fields,
846 	},
847 };
848 
849 #define	CPU_DESC_FIELD(desc, idx)					\
850     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
851 
852 static int
853 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
854     uint32_t esr)
855 {
856 	uint64_t value;
857 	int CRm, Op2, i, reg;
858 
859 	if ((insn & MRS_MASK) != MRS_VALUE)
860 		return (0);
861 
862 	/*
863 	 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
864 	 * These are in the EL1 CPU identification space.
865 	 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
866 	 * CRm == {4-7} holds the ID_AA64 registers.
867 	 *
868 	 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
869 	 * Table D9-2 System instruction encodings for non-Debug System
870 	 * register accesses.
871 	 */
872 	if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
873 		return (0);
874 
875 	CRm = mrs_CRm(insn);
876 	if (CRm > 7 || (CRm < 4 && CRm != 0))
877 		return (0);
878 
879 	Op2 = mrs_Op2(insn);
880 	value = 0;
881 
882 	for (i = 0; i < nitems(user_regs); i++) {
883 		if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
884 			value = CPU_DESC_FIELD(user_cpu_desc, i);
885 			break;
886 		}
887 	}
888 
889 	if (CRm == 0) {
890 		switch (Op2) {
891 		case 0:
892 			value = READ_SPECIALREG(midr_el1);
893 			break;
894 		case 5:
895 			value = READ_SPECIALREG(mpidr_el1);
896 			break;
897 		case 6:
898 			value = READ_SPECIALREG(revidr_el1);
899 			break;
900 		default:
901 			return (0);
902 		}
903 	}
904 
905 	/*
906 	 * We will handle this instruction, move to the next so we
907 	 * don't trap here again.
908 	 */
909 	frame->tf_elr += INSN_SIZE;
910 
911 	reg = MRS_REGISTER(insn);
912 	/* If reg is 31 then write to xzr, i.e. do nothing */
913 	if (reg == 31)
914 		return (1);
915 
916 	if (reg < nitems(frame->tf_x))
917 		frame->tf_x[reg] = value;
918 	else if (reg == 30)
919 		frame->tf_lr = value;
920 
921 	return (1);
922 }
923 
924 bool
925 extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val)
926 {
927 	uint64_t value;
928 	int i;
929 
930 	for (i = 0; i < nitems(user_regs); i++) {
931 		if (user_regs[i].reg == reg) {
932 			value = CPU_DESC_FIELD(user_cpu_desc, i);
933 			*val = value >> field_shift;
934 			return (true);
935 		}
936 	}
937 
938 	return (false);
939 }
940 
941 bool
942 get_kernel_reg(u_int reg, uint64_t *val)
943 {
944 	int i;
945 
946 	for (i = 0; i < nitems(user_regs); i++) {
947 		if (user_regs[i].reg == reg) {
948 			*val = CPU_DESC_FIELD(kern_cpu_desc, i);
949 			return (true);
950 		}
951 	}
952 
953 	return (false);
954 }
955 
956 static uint64_t
957 update_lower_register(uint64_t val, uint64_t new_val, u_int shift,
958     int width, bool sign)
959 {
960 	uint64_t mask;
961 	uint64_t new_field, old_field;
962 	bool update;
963 
964 	KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__,
965 	    width));
966 
967 	mask = (1ul << width) - 1;
968 	new_field = (new_val >> shift) & mask;
969 	old_field = (val >> shift) & mask;
970 
971 	update = false;
972 	if (sign) {
973 		/*
974 		 * The field is signed. Toggle the upper bit so the comparison
975 		 * works on unsigned values as this makes positive numbers,
976 		 * i.e. those with a 0 bit, larger than negative numbers,
977 		 * i.e. those with a 1 bit, in an unsigned comparison.
978 		 */
979 		if ((new_field ^ (1ul << (width - 1))) <
980 		    (old_field ^ (1ul << (width - 1))))
981 			update = true;
982 	} else {
983 		if (new_field < old_field)
984 			update = true;
985 	}
986 
987 	if (update) {
988 		val &= ~(mask << shift);
989 		val |= new_field << shift;
990 	}
991 
992 	return (val);
993 }
994 
995 void
996 update_special_regs(u_int cpu)
997 {
998 	struct mrs_field *fields;
999 	uint64_t user_reg, kern_reg, value;
1000 	int i, j;
1001 
1002 	if (cpu == 0) {
1003 		/* Create a user visible cpu description with safe values */
1004 		memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
1005 		/* Safe values for these registers */
1006 		user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
1007 		    ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 |
1008 		    ID_AA64PFR0_EL0_64;
1009 		user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
1010 	}
1011 
1012 	for (i = 0; i < nitems(user_regs); i++) {
1013 		value = CPU_DESC_FIELD(cpu_desc[cpu], i);
1014 		if (cpu == 0) {
1015 			kern_reg = value;
1016 			user_reg = value;
1017 		} else {
1018 			kern_reg = CPU_DESC_FIELD(kern_cpu_desc, i);
1019 			user_reg = CPU_DESC_FIELD(user_cpu_desc, i);
1020 		}
1021 
1022 		fields = user_regs[i].fields;
1023 		for (j = 0; fields[j].type != 0; j++) {
1024 			switch (fields[j].type & MRS_TYPE_MASK) {
1025 			case MRS_EXACT:
1026 				user_reg &= ~(0xfu << fields[j].shift);
1027 				user_reg |=
1028 				    (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
1029 				    fields[j].shift;
1030 				break;
1031 			case MRS_LOWER:
1032 				user_reg = update_lower_register(user_reg,
1033 				    value, fields[j].shift, 4, fields[j].sign);
1034 				break;
1035 			default:
1036 				panic("Invalid field type: %d", fields[j].type);
1037 			}
1038 			kern_reg = update_lower_register(kern_reg, value,
1039 			    fields[j].shift, 4, fields[j].sign);
1040 		}
1041 
1042 		CPU_DESC_FIELD(kern_cpu_desc, i) = kern_reg;
1043 		CPU_DESC_FIELD(user_cpu_desc, i) = user_reg;
1044 	}
1045 }
1046 
1047 /* HWCAP */
1048 extern u_long elf_hwcap;
1049 bool __read_frequently lse_supported = false;
1050 
1051 bool __read_frequently icache_aliasing = false;
1052 bool __read_frequently icache_vmid = false;
1053 
1054 int64_t dcache_line_size;	/* The minimum D cache line size */
1055 int64_t icache_line_size;	/* The minimum I cache line size */
1056 int64_t idcache_line_size;	/* The minimum cache line size */
1057 
1058 static void
1059 identify_cpu_sysinit(void *dummy __unused)
1060 {
1061 	int cpu;
1062 	u_long hwcap;
1063 	bool dic, idc;
1064 
1065 	dic = (allow_dic != 0);
1066 	idc = (allow_idc != 0);
1067 
1068 	CPU_FOREACH(cpu) {
1069 		check_cpu_regs(cpu);
1070 		hwcap = parse_cpu_features_hwcap(cpu);
1071 		if (elf_hwcap == 0)
1072 			elf_hwcap = hwcap;
1073 		else
1074 			elf_hwcap &= hwcap;
1075 		if (cpu != 0)
1076 			update_special_regs(cpu);
1077 
1078 		if (CTR_DIC_VAL(cpu_desc[cpu].ctr) == 0)
1079 			dic = false;
1080 		if (CTR_IDC_VAL(cpu_desc[cpu].ctr) == 0)
1081 			idc = false;
1082 	}
1083 
1084 	if (dic && idc) {
1085 		arm64_icache_sync_range = &arm64_dic_idc_icache_sync_range;
1086 		if (bootverbose)
1087 			printf("Enabling DIC & IDC ICache sync\n");
1088 	}
1089 
1090 	if ((elf_hwcap & HWCAP_ATOMICS) != 0) {
1091 		lse_supported = true;
1092 		if (bootverbose)
1093 			printf("Enabling LSE atomics in the kernel\n");
1094 	}
1095 #ifdef LSE_ATOMICS
1096 	if (!lse_supported)
1097 		panic("CPU does not support LSE atomic instructions");
1098 #endif
1099 
1100 	install_undef_handler(true, user_mrs_handler);
1101 }
1102 SYSINIT(identify_cpu, SI_SUB_CPU, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
1103 
1104 static void
1105 cpu_features_sysinit(void *dummy __unused)
1106 {
1107 	u_int cpu;
1108 
1109 	CPU_FOREACH(cpu)
1110 		print_cpu_features(cpu);
1111 }
1112 SYSINIT(cpu_features, SI_SUB_SMP, SI_ORDER_ANY, cpu_features_sysinit, NULL);
1113 
1114 static u_long
1115 parse_cpu_features_hwcap(u_int cpu)
1116 {
1117 	u_long hwcap = 0;
1118 
1119 	if (ID_AA64ISAR0_DP_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL)
1120 		hwcap |= HWCAP_ASIMDDP;
1121 
1122 	if (ID_AA64ISAR0_SM4_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL)
1123 		hwcap |= HWCAP_SM4;
1124 
1125 	if (ID_AA64ISAR0_SM3_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL)
1126 		hwcap |= HWCAP_SM3;
1127 
1128 	if (ID_AA64ISAR0_RDM_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL)
1129 		hwcap |= HWCAP_ASIMDRDM;
1130 
1131 	if (ID_AA64ISAR0_Atomic_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL)
1132 		hwcap |= HWCAP_ATOMICS;
1133 
1134 	if (ID_AA64ISAR0_CRC32_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE)
1135 		hwcap |= HWCAP_CRC32;
1136 
1137 	switch (ID_AA64ISAR0_SHA2_VAL(cpu_desc[cpu].id_aa64isar0)) {
1138 		case ID_AA64ISAR0_SHA2_BASE:
1139 			hwcap |= HWCAP_SHA2;
1140 			break;
1141 		case ID_AA64ISAR0_SHA2_512:
1142 			hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
1143 			break;
1144 	default:
1145 		break;
1146 	}
1147 
1148 	if (ID_AA64ISAR0_SHA1_VAL(cpu_desc[cpu].id_aa64isar0))
1149 		hwcap |= HWCAP_SHA1;
1150 
1151 	switch (ID_AA64ISAR0_AES_VAL(cpu_desc[cpu].id_aa64isar0)) {
1152 	case ID_AA64ISAR0_AES_BASE:
1153 		hwcap |= HWCAP_AES;
1154 		break;
1155 	case ID_AA64ISAR0_AES_PMULL:
1156 		hwcap |= HWCAP_PMULL | HWCAP_AES;
1157 		break;
1158 	default:
1159 		break;
1160 	}
1161 
1162 	if (ID_AA64ISAR1_LRCPC_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL)
1163 		hwcap |= HWCAP_LRCPC;
1164 
1165 	if (ID_AA64ISAR1_FCMA_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL)
1166 		hwcap |= HWCAP_FCMA;
1167 
1168 	if (ID_AA64ISAR1_JSCVT_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL)
1169 		hwcap |= HWCAP_JSCVT;
1170 
1171 	if (ID_AA64ISAR1_DPB_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL)
1172 		hwcap |= HWCAP_DCPOP;
1173 
1174 	if (ID_AA64PFR0_SVE_VAL(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL)
1175 		hwcap |= HWCAP_SVE;
1176 
1177 	switch (ID_AA64PFR0_AdvSIMD_VAL(cpu_desc[cpu].id_aa64pfr0)) {
1178 	case ID_AA64PFR0_AdvSIMD_IMPL:
1179 		hwcap |= HWCAP_ASIMD;
1180 		break;
1181 	case ID_AA64PFR0_AdvSIMD_HP:
1182 		hwcap |= HWCAP_ASIMD | HWCAP_ASIMDDP;
1183 		break;
1184 	default:
1185 		break;
1186 	}
1187 
1188 	switch (ID_AA64PFR0_FP_VAL(cpu_desc[cpu].id_aa64pfr0)) {
1189 	case ID_AA64PFR0_FP_IMPL:
1190 		hwcap |= HWCAP_FP;
1191 		break;
1192 	case ID_AA64PFR0_FP_HP:
1193 		hwcap |= HWCAP_FP | HWCAP_FPHP;
1194 		break;
1195 	default:
1196 		break;
1197 	}
1198 
1199 	return (hwcap);
1200 }
1201 
1202 static void
1203 print_ctr_fields(struct sbuf *sb, uint64_t reg, void *arg)
1204 {
1205 
1206 	sbuf_printf(sb, "%u byte D-cacheline,", CTR_DLINE_SIZE(reg));
1207 	sbuf_printf(sb, "%u byte I-cacheline,", CTR_ILINE_SIZE(reg));
1208 	reg &= ~(CTR_DLINE_MASK | CTR_ILINE_MASK);
1209 
1210 	switch(CTR_L1IP_VAL(reg)) {
1211 	case CTR_L1IP_VPIPT:
1212 		sbuf_printf(sb, "VPIPT");
1213 		break;
1214 	case CTR_L1IP_AIVIVT:
1215 		sbuf_printf(sb, "AIVIVT");
1216 		break;
1217 	case CTR_L1IP_VIPT:
1218 		sbuf_printf(sb, "VIPT");
1219 		break;
1220 	case CTR_L1IP_PIPT:
1221 		sbuf_printf(sb, "PIPT");
1222 		break;
1223 	}
1224 	sbuf_printf(sb, " ICache,");
1225 	reg &= ~CTR_L1IP_MASK;
1226 
1227 	sbuf_printf(sb, "%d byte ERG,", CTR_ERG_SIZE(reg));
1228 	sbuf_printf(sb, "%d byte CWG", CTR_CWG_SIZE(reg));
1229 	reg &= ~(CTR_ERG_MASK | CTR_CWG_MASK);
1230 
1231 	if (CTR_IDC_VAL(reg) != 0)
1232 		sbuf_printf(sb, ",IDC");
1233 	if (CTR_DIC_VAL(reg) != 0)
1234 		sbuf_printf(sb, ",DIC");
1235 	reg &= ~(CTR_IDC_MASK | CTR_DIC_MASK);
1236 	reg &= ~CTR_RES1;
1237 
1238 	if (reg != 0)
1239 		sbuf_printf(sb, ",%lx", reg);
1240 }
1241 
1242 static void
1243 print_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
1244     void (*print_fields)(struct sbuf *, uint64_t, void *), void *arg)
1245 {
1246 
1247 	sbuf_printf(sb, "%29s = <", reg_name);
1248 
1249 	print_fields(sb, reg, arg);
1250 
1251 	sbuf_finish(sb);
1252 	printf("%s>\n", sbuf_data(sb));
1253 	sbuf_clear(sb);
1254 }
1255 
1256 static void
1257 print_id_fields(struct sbuf *sb, uint64_t reg, void *arg)
1258 {
1259 	struct mrs_field *fields = arg;
1260 	struct mrs_field_value *fv;
1261 	int field, i, j, printed;
1262 
1263 #define SEP_STR	((printed++) == 0) ? "" : ","
1264 	printed = 0;
1265 	for (i = 0; fields[i].type != 0; i++) {
1266 		fv = fields[i].values;
1267 
1268 		/* TODO: Handle with an unknown message */
1269 		if (fv == NULL)
1270 			continue;
1271 
1272 		field = (reg & fields[i].mask) >> fields[i].shift;
1273 		for (j = 0; fv[j].desc != NULL; j++) {
1274 			if ((fv[j].value >> fields[i].shift) != field)
1275 				continue;
1276 
1277 			if (fv[j].desc[0] != '\0')
1278 				sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc);
1279 			break;
1280 		}
1281 		if (fv[j].desc == NULL)
1282 			sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR,
1283 			    fields[i].name, field);
1284 
1285 		reg &= ~(0xful << fields[i].shift);
1286 	}
1287 
1288 	if (reg != 0)
1289 		sbuf_printf(sb, "%s%#lx", SEP_STR, reg);
1290 #undef SEP_STR
1291 }
1292 
1293 static void
1294 print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
1295     struct mrs_field *fields)
1296 {
1297 
1298 	print_register(sb, reg_name, reg, print_id_fields, fields);
1299 }
1300 
1301 static void
1302 print_cpu_features(u_int cpu)
1303 {
1304 	struct sbuf *sb;
1305 
1306 	sb = sbuf_new_auto();
1307 	sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
1308 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1309 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1310 
1311 	sbuf_cat(sb, " affinity:");
1312 	switch(cpu_aff_levels) {
1313 	default:
1314 	case 4:
1315 		sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
1316 		/* FALLTHROUGH */
1317 	case 3:
1318 		sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
1319 		/* FALLTHROUGH */
1320 	case 2:
1321 		sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
1322 		/* FALLTHROUGH */
1323 	case 1:
1324 	case 0: /* On UP this will be zero */
1325 		sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
1326 		break;
1327 	}
1328 	sbuf_finish(sb);
1329 	printf("%s\n", sbuf_data(sb));
1330 	sbuf_clear(sb);
1331 
1332 	/*
1333 	 * There is a hardware errata where, if one CPU is performing a TLB
1334 	 * invalidation while another is performing a store-exclusive the
1335 	 * store-exclusive may return the wrong status. A workaround seems
1336 	 * to be to use an IPI to invalidate on each CPU, however given the
1337 	 * limited number of affected units (pass 1.1 is the evaluation
1338 	 * hardware revision), and the lack of information from Cavium
1339 	 * this has not been implemented.
1340 	 *
1341 	 * At the time of writing this the only information is from:
1342 	 * https://lkml.org/lkml/2016/8/4/722
1343 	 */
1344 	/*
1345 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
1346 	 * triggers on pass 2.0+.
1347 	 */
1348 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
1349 	    CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
1350 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
1351 		    "hardware bugs that may cause the incorrect operation of "
1352 		    "atomic operations.\n");
1353 
1354 	/* Cache Type Register */
1355 	if (cpu == 0 || (cpu_print_regs & PRINT_CTR_EL0) != 0) {
1356 		print_register(sb, "Cache Type",
1357 		    cpu_desc[cpu].ctr, print_ctr_fields, NULL);
1358 	}
1359 
1360 	/* AArch64 Instruction Set Attribute Register 0 */
1361 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0)
1362 		print_id_register(sb, "Instruction Set Attributes 0",
1363 		    cpu_desc[cpu].id_aa64isar0, id_aa64isar0_fields);
1364 
1365 	/* AArch64 Instruction Set Attribute Register 1 */
1366 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0)
1367 		print_id_register(sb, "Instruction Set Attributes 1",
1368 		    cpu_desc[cpu].id_aa64isar1, id_aa64isar1_fields);
1369 
1370 	/* AArch64 Processor Feature Register 0 */
1371 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0)
1372 		print_id_register(sb, "Processor Features 0",
1373 		    cpu_desc[cpu].id_aa64pfr0, id_aa64pfr0_fields);
1374 
1375 	/* AArch64 Processor Feature Register 1 */
1376 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0)
1377 		print_id_register(sb, "Processor Features 1",
1378 		    cpu_desc[cpu].id_aa64pfr1, id_aa64pfr1_fields);
1379 
1380 	/* AArch64 Memory Model Feature Register 0 */
1381 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0)
1382 		print_id_register(sb, "Memory Model Features 0",
1383 		    cpu_desc[cpu].id_aa64mmfr0, id_aa64mmfr0_fields);
1384 
1385 	/* AArch64 Memory Model Feature Register 1 */
1386 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0)
1387 		print_id_register(sb, "Memory Model Features 1",
1388 		    cpu_desc[cpu].id_aa64mmfr1, id_aa64mmfr1_fields);
1389 
1390 	/* AArch64 Memory Model Feature Register 2 */
1391 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0)
1392 		print_id_register(sb, "Memory Model Features 2",
1393 		    cpu_desc[cpu].id_aa64mmfr2, id_aa64mmfr2_fields);
1394 
1395 	/* AArch64 Debug Feature Register 0 */
1396 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0)
1397 		print_id_register(sb, "Debug Features 0",
1398 		    cpu_desc[cpu].id_aa64dfr0, id_aa64dfr0_fields);
1399 
1400 	/* AArch64 Memory Model Feature Register 1 */
1401 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0)
1402 		print_id_register(sb, "Debug Features 1",
1403 		    cpu_desc[cpu].id_aa64dfr1, id_aa64dfr1_fields);
1404 
1405 	/* AArch64 Auxiliary Feature Register 0 */
1406 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0)
1407 		print_id_register(sb, "Auxiliary Features 0",
1408 		    cpu_desc[cpu].id_aa64afr0, id_aa64afr0_fields);
1409 
1410 	/* AArch64 Auxiliary Feature Register 1 */
1411 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0)
1412 		print_id_register(sb, "Auxiliary Features 1",
1413 		    cpu_desc[cpu].id_aa64afr1, id_aa64afr1_fields);
1414 
1415 	sbuf_delete(sb);
1416 	sb = NULL;
1417 #undef SEP_STR
1418 }
1419 
1420 void
1421 identify_cache(uint64_t ctr)
1422 {
1423 
1424 	/* Identify the L1 cache type */
1425 	switch (CTR_L1IP_VAL(ctr)) {
1426 	case CTR_L1IP_PIPT:
1427 		break;
1428 	case CTR_L1IP_VPIPT:
1429 		icache_vmid = true;
1430 		break;
1431 	default:
1432 	case CTR_L1IP_VIPT:
1433 		icache_aliasing = true;
1434 		break;
1435 	}
1436 
1437 	if (dcache_line_size == 0) {
1438 		KASSERT(icache_line_size == 0, ("%s: i-cacheline size set: %ld",
1439 		    __func__, icache_line_size));
1440 
1441 		/* Get the D cache line size */
1442 		dcache_line_size = CTR_DLINE_SIZE(ctr);
1443 		/* And the same for the I cache */
1444 		icache_line_size = CTR_ILINE_SIZE(ctr);
1445 
1446 		idcache_line_size = MIN(dcache_line_size, icache_line_size);
1447 	}
1448 
1449 	if (dcache_line_size != CTR_DLINE_SIZE(ctr)) {
1450 		printf("WARNING: D-cacheline size mismatch %ld != %d\n",
1451 		    dcache_line_size, CTR_DLINE_SIZE(ctr));
1452 	}
1453 
1454 	if (icache_line_size != CTR_ILINE_SIZE(ctr)) {
1455 		printf("WARNING: I-cacheline size mismatch %ld != %d\n",
1456 		    icache_line_size, CTR_ILINE_SIZE(ctr));
1457 	}
1458 }
1459 
1460 void
1461 identify_cpu(u_int cpu)
1462 {
1463 	u_int midr;
1464 	u_int impl_id;
1465 	u_int part_id;
1466 	size_t i;
1467 	const struct cpu_parts *cpu_partsp = NULL;
1468 
1469 	midr = get_midr();
1470 
1471 	impl_id = CPU_IMPL(midr);
1472 	for (i = 0; i < nitems(cpu_implementers); i++) {
1473 		if (impl_id == cpu_implementers[i].impl_id ||
1474 		    cpu_implementers[i].impl_id == 0) {
1475 			cpu_desc[cpu].cpu_impl = impl_id;
1476 			cpu_desc[cpu].cpu_impl_name =
1477 			    cpu_implementers[i].impl_name;
1478 			cpu_partsp = cpu_implementers[i].cpu_parts;
1479 			break;
1480 		}
1481 	}
1482 
1483 	part_id = CPU_PART(midr);
1484 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
1485 		if (part_id == cpu_partsp[i].part_id ||
1486 		    cpu_partsp[i].part_id == 0) {
1487 			cpu_desc[cpu].cpu_part_num = part_id;
1488 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1489 			break;
1490 		}
1491 	}
1492 
1493 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1494 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1495 
1496 	snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
1497 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1498 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1499 
1500 	/* Save affinity for current CPU */
1501 	cpu_desc[cpu].mpidr = get_mpidr();
1502 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1503 
1504 	cpu_desc[cpu].ctr = READ_SPECIALREG(ctr_el0);
1505 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
1506 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
1507 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
1508 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
1509 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
1510 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1511 	cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1);
1512 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
1513 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
1514 }
1515 
1516 static void
1517 check_cpu_regs(u_int cpu)
1518 {
1519 
1520 	switch (cpu_aff_levels) {
1521 	case 0:
1522 		if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1523 		    CPU_AFF0(cpu_desc[0].mpidr))
1524 			cpu_aff_levels = 1;
1525 		/* FALLTHROUGH */
1526 	case 1:
1527 		if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1528 		    CPU_AFF1(cpu_desc[0].mpidr))
1529 			cpu_aff_levels = 2;
1530 		/* FALLTHROUGH */
1531 	case 2:
1532 		if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1533 		    CPU_AFF2(cpu_desc[0].mpidr))
1534 			cpu_aff_levels = 3;
1535 		/* FALLTHROUGH */
1536 	case 3:
1537 		if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1538 		    CPU_AFF3(cpu_desc[0].mpidr))
1539 			cpu_aff_levels = 4;
1540 		break;
1541 	}
1542 
1543 	if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1544 		cpu_print_regs |= PRINT_ID_AA64_AFR0;
1545 	if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1546 		cpu_print_regs |= PRINT_ID_AA64_AFR1;
1547 
1548 	if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1549 		cpu_print_regs |= PRINT_ID_AA64_DFR0;
1550 	if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1551 		cpu_print_regs |= PRINT_ID_AA64_DFR1;
1552 
1553 	if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1554 		cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1555 	if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1556 		cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1557 
1558 	if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1559 		cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1560 	if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1561 		cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1562 	if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1563 		cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1564 
1565 	if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1566 		cpu_print_regs |= PRINT_ID_AA64_PFR0;
1567 	if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1568 		cpu_print_regs |= PRINT_ID_AA64_PFR1;
1569 
1570 	if (cpu_desc[cpu].ctr != cpu_desc[0].ctr) {
1571 		/*
1572 		 * If the cache type register is different we may
1573 		 * have a different l1 cache type.
1574 		 */
1575 		identify_cache(cpu_desc[cpu].ctr);
1576 		cpu_print_regs |= PRINT_CTR_EL0;
1577 	}
1578 }
1579