xref: /freebsd/sys/arm64/arm64/identcpu.c (revision 7cc42f6d)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/elf.h>
47 #include <machine/md_var.h>
48 #include <machine/undefined.h>
49 
50 static void print_cpu_features(u_int cpu);
51 static u_long parse_cpu_features_hwcap(void);
52 static u_long parse_cpu_features_hwcap2(void);
53 
54 char machine[] = "arm64";
55 
56 #ifdef SCTL_MASK32
57 extern int adaptive_machine_arch;
58 #endif
59 
60 static SYSCTL_NODE(_machdep, OID_AUTO, cache, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
61     "Cache management tuning");
62 
63 static int allow_dic = 1;
64 SYSCTL_INT(_machdep_cache, OID_AUTO, allow_dic, CTLFLAG_RDTUN, &allow_dic, 0,
65     "Allow optimizations based on the DIC cache bit");
66 
67 static int allow_idc = 1;
68 SYSCTL_INT(_machdep_cache, OID_AUTO, allow_idc, CTLFLAG_RDTUN, &allow_idc, 0,
69     "Allow optimizations based on the IDC cache bit");
70 
71 static void check_cpu_regs(u_int cpu);
72 
73 /*
74  * The default implementation of I-cache sync assumes we have an
75  * aliasing cache until we know otherwise.
76  */
77 void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) =
78     &arm64_aliasing_icache_sync_range;
79 
80 static int
81 sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
82 {
83 #ifdef SCTL_MASK32
84 	static const char machine32[] = "arm";
85 #endif
86 	int error;
87 
88 #ifdef SCTL_MASK32
89 	if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
90 		error = SYSCTL_OUT(req, machine32, sizeof(machine32));
91 	else
92 #endif
93 		error = SYSCTL_OUT(req, machine, sizeof(machine));
94 	return (error);
95 }
96 
97 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
98 	CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
99 
100 static char cpu_model[64];
101 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
102 	cpu_model, sizeof(cpu_model), "Machine model");
103 
104 /*
105  * Per-CPU affinity as provided in MPIDR_EL1
106  * Indexed by CPU number in logical order selected by the system.
107  * Relevant fields can be extracted using CPU_AFFn macros,
108  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
109  *
110  * Fields used by us:
111  * Aff1 - Cluster number
112  * Aff0 - CPU number in Aff1 cluster
113  */
114 uint64_t __cpu_affinity[MAXCPU];
115 static u_int cpu_aff_levels;
116 
117 struct cpu_desc {
118 	u_int		cpu_impl;
119 	u_int		cpu_part_num;
120 	u_int		cpu_variant;
121 	u_int		cpu_revision;
122 	const char	*cpu_impl_name;
123 	const char	*cpu_part_name;
124 
125 	uint64_t	mpidr;
126 	uint64_t	id_aa64afr0;
127 	uint64_t	id_aa64afr1;
128 	uint64_t	id_aa64dfr0;
129 	uint64_t	id_aa64dfr1;
130 	uint64_t	id_aa64isar0;
131 	uint64_t	id_aa64isar1;
132 	uint64_t	id_aa64mmfr0;
133 	uint64_t	id_aa64mmfr1;
134 	uint64_t	id_aa64mmfr2;
135 	uint64_t	id_aa64pfr0;
136 	uint64_t	id_aa64pfr1;
137 	uint64_t	ctr;
138 };
139 
140 static struct cpu_desc cpu_desc[MAXCPU];
141 static struct cpu_desc kern_cpu_desc;
142 static struct cpu_desc user_cpu_desc;
143 static u_int cpu_print_regs;
144 #define	PRINT_ID_AA64_AFR0	0x00000001
145 #define	PRINT_ID_AA64_AFR1	0x00000002
146 #define	PRINT_ID_AA64_DFR0	0x00000010
147 #define	PRINT_ID_AA64_DFR1	0x00000020
148 #define	PRINT_ID_AA64_ISAR0	0x00000100
149 #define	PRINT_ID_AA64_ISAR1	0x00000200
150 #define	PRINT_ID_AA64_MMFR0	0x00001000
151 #define	PRINT_ID_AA64_MMFR1	0x00002000
152 #define	PRINT_ID_AA64_MMFR2	0x00004000
153 #define	PRINT_ID_AA64_PFR0	0x00010000
154 #define	PRINT_ID_AA64_PFR1	0x00020000
155 #define	PRINT_CTR_EL0		0x10000000
156 
157 struct cpu_parts {
158 	u_int		part_id;
159 	const char	*part_name;
160 };
161 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
162 
163 struct cpu_implementers {
164 	u_int			impl_id;
165 	const char		*impl_name;
166 	/*
167 	 * Part number is implementation defined
168 	 * so each vendor will have its own set of values and names.
169 	 */
170 	const struct cpu_parts	*cpu_parts;
171 };
172 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
173 
174 /*
175  * Per-implementer table of (PartNum, CPU Name) pairs.
176  */
177 /* ARM Ltd. */
178 static const struct cpu_parts cpu_parts_arm[] = {
179 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
180 	{ CPU_PART_CORTEX_A35, "Cortex-A35" },
181 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
182 	{ CPU_PART_CORTEX_A55, "Cortex-A55" },
183 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
184 	{ CPU_PART_CORTEX_A65, "Cortex-A65" },
185 	{ CPU_PART_CORTEX_A72, "Cortex-A72" },
186 	{ CPU_PART_CORTEX_A73, "Cortex-A73" },
187 	{ CPU_PART_CORTEX_A75, "Cortex-A75" },
188 	{ CPU_PART_CORTEX_A76, "Cortex-A76" },
189 	{ CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
190 	{ CPU_PART_CORTEX_A77, "Cortex-A77" },
191 	{ CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
192 	CPU_PART_NONE,
193 };
194 
195 /* Cavium */
196 static const struct cpu_parts cpu_parts_cavium[] = {
197 	{ CPU_PART_THUNDERX, "ThunderX" },
198 	{ CPU_PART_THUNDERX2, "ThunderX2" },
199 	CPU_PART_NONE,
200 };
201 
202 /* APM / Ampere */
203 static const struct cpu_parts cpu_parts_apm[] = {
204 	{ CPU_PART_EMAG8180, "eMAG 8180" },
205 	CPU_PART_NONE,
206 };
207 
208 /* Unknown */
209 static const struct cpu_parts cpu_parts_none[] = {
210 	CPU_PART_NONE,
211 };
212 
213 /*
214  * Implementers table.
215  */
216 const struct cpu_implementers cpu_implementers[] = {
217 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
218 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
219 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
220 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
221 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
222 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
223 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
224 	{ CPU_IMPL_APM,		"APM",		cpu_parts_apm },
225 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
226 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
227 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
228 	CPU_IMPLEMENTER_NONE,
229 };
230 
231 #define	MRS_TYPE_MASK		0xf
232 #define	MRS_INVALID		0
233 #define	MRS_EXACT		1
234 #define	MRS_EXACT_VAL(x)	(MRS_EXACT | ((x) << 4))
235 #define	MRS_EXACT_FIELD(x)	((x) >> 4)
236 #define	MRS_LOWER		2
237 
238 struct mrs_field_value {
239 	uint64_t	value;
240 	const char	*desc;
241 };
242 
243 #define	MRS_FIELD_VALUE(_value, _desc)					\
244 	{								\
245 		.value = (_value),					\
246 		.desc = (_desc),					\
247 	}
248 
249 #define	MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl)		\
250 	MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""),		\
251 	MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field)
252 
253 #define	MRS_FIELD_VALUE_COUNT(_reg, _field, _desc)			\
254 	MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \
255 	MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \
256 	MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \
257 	MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \
258 	MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \
259 	MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \
260 	MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \
261 	MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \
262 	MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \
263 	MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \
264 	MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \
265 	MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \
266 	MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \
267 	MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \
268 	MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \
269 	MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s")
270 
271 #define	MRS_FIELD_VALUE_END	{ .desc = NULL }
272 
273 struct mrs_field {
274 	const char	*name;
275 	struct mrs_field_value *values;
276 	uint64_t	mask;
277 	bool		sign;
278 	u_int		type;
279 	u_int		shift;
280 };
281 
282 #define	MRS_FIELD(_register, _name, _sign, _type, _values)		\
283 	{								\
284 		.name = #_name,						\
285 		.sign = (_sign),					\
286 		.type = (_type),					\
287 		.shift = _register ## _ ## _name ## _SHIFT,		\
288 		.mask = _register ## _ ## _name ## _MASK,		\
289 		.values = (_values),					\
290 	}
291 
292 #define	MRS_FIELD_END	{ .type = MRS_INVALID, }
293 
294 /* ID_AA64AFR0_EL1 */
295 static struct mrs_field id_aa64afr0_fields[] = {
296 	MRS_FIELD_END,
297 };
298 
299 
300 /* ID_AA64AFR1_EL1 */
301 static struct mrs_field id_aa64afr1_fields[] = {
302 	MRS_FIELD_END,
303 };
304 
305 
306 /* ID_AA64DFR0_EL1 */
307 static struct mrs_field_value id_aa64dfr0_pmsver[] = {
308 	MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""),
309 	MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_V1, "SPE"),
310 	MRS_FIELD_VALUE_END,
311 };
312 
313 static struct mrs_field_value id_aa64dfr0_ctx_cmps[] = {
314 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"),
315 	MRS_FIELD_VALUE_END,
316 };
317 
318 static struct mrs_field_value id_aa64dfr0_wrps[] = {
319 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"),
320 	MRS_FIELD_VALUE_END,
321 };
322 
323 static struct mrs_field_value id_aa64dfr0_brps[] = {
324 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"),
325 	MRS_FIELD_VALUE_END,
326 };
327 
328 static struct mrs_field_value id_aa64dfr0_pmuver[] = {
329 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""),
330 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"),
331 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3+16 bit evtCount"),
332 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"),
333 	MRS_FIELD_VALUE_END,
334 };
335 
336 static struct mrs_field_value id_aa64dfr0_tracever[] = {
337 	MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""),
338 	MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"),
339 	MRS_FIELD_VALUE_END,
340 };
341 
342 static struct mrs_field_value id_aa64dfr0_debugver[] = {
343 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"),
344 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"),
345 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8.2"),
346 	MRS_FIELD_VALUE_END,
347 };
348 
349 static struct mrs_field id_aa64dfr0_fields[] = {
350 	MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver),
351 	MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT,
352 	    id_aa64dfr0_ctx_cmps),
353 	MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_EXACT, id_aa64dfr0_wrps),
354 	MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps),
355 	MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver),
356 	MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT,
357 	    id_aa64dfr0_tracever),
358 	MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6),
359 	    id_aa64dfr0_debugver),
360 	MRS_FIELD_END,
361 };
362 
363 
364 /* ID_AA64DFR1 */
365 static struct mrs_field id_aa64dfr1_fields[] = {
366 	MRS_FIELD_END,
367 };
368 
369 
370 /* ID_AA64ISAR0_EL1 */
371 static struct mrs_field_value id_aa64isar0_rndr[] = {
372 	MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_NONE, ""),
373 	MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_IMPL, "RNG"),
374 	MRS_FIELD_VALUE_END,
375 };
376 
377 static struct mrs_field_value id_aa64isar0_tlb[] = {
378 	MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_NONE, ""),
379 	MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOS, "TLBI-OS"),
380 	MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOSR, "TLBI-OSR"),
381 	MRS_FIELD_VALUE_END,
382 };
383 
384 static struct mrs_field_value id_aa64isar0_ts[] = {
385 	MRS_FIELD_VALUE(ID_AA64ISAR0_TS_NONE, ""),
386 	MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_4, "CondM-8.4"),
387 	MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_5, "CondM-8.5"),
388 	MRS_FIELD_VALUE_END,
389 };
390 
391 static struct mrs_field_value id_aa64isar0_fhm[] = {
392 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, FHM, NONE, IMPL),
393 	MRS_FIELD_VALUE_END,
394 };
395 
396 static struct mrs_field_value id_aa64isar0_dp[] = {
397 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL),
398 	MRS_FIELD_VALUE_END,
399 };
400 
401 static struct mrs_field_value id_aa64isar0_sm4[] = {
402 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL),
403 	MRS_FIELD_VALUE_END,
404 };
405 
406 static struct mrs_field_value id_aa64isar0_sm3[] = {
407 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL),
408 	MRS_FIELD_VALUE_END,
409 };
410 
411 static struct mrs_field_value id_aa64isar0_sha3[] = {
412 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL),
413 	MRS_FIELD_VALUE_END,
414 };
415 
416 static struct mrs_field_value id_aa64isar0_rdm[] = {
417 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL),
418 	MRS_FIELD_VALUE_END,
419 };
420 
421 static struct mrs_field_value id_aa64isar0_atomic[] = {
422 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL),
423 	MRS_FIELD_VALUE_END,
424 };
425 
426 static struct mrs_field_value id_aa64isar0_crc32[] = {
427 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE),
428 	MRS_FIELD_VALUE_END,
429 };
430 
431 static struct mrs_field_value id_aa64isar0_sha2[] = {
432 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE),
433 	MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"),
434 	MRS_FIELD_VALUE_END,
435 };
436 
437 static struct mrs_field_value id_aa64isar0_sha1[] = {
438 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE),
439 	MRS_FIELD_VALUE_END,
440 };
441 
442 static struct mrs_field_value id_aa64isar0_aes[] = {
443 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE),
444 	MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"),
445 	MRS_FIELD_VALUE_END,
446 };
447 
448 static struct mrs_field id_aa64isar0_fields[] = {
449 	MRS_FIELD(ID_AA64ISAR0, RNDR, false, MRS_LOWER, id_aa64isar0_rndr),
450 	MRS_FIELD(ID_AA64ISAR0, TLB, false, MRS_LOWER, id_aa64isar0_tlb),
451 	MRS_FIELD(ID_AA64ISAR0, TS, false, MRS_LOWER, id_aa64isar0_ts),
452 	MRS_FIELD(ID_AA64ISAR0, FHM, false, MRS_LOWER, id_aa64isar0_fhm),
453 	MRS_FIELD(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp),
454 	MRS_FIELD(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4),
455 	MRS_FIELD(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3),
456 	MRS_FIELD(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3),
457 	MRS_FIELD(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm),
458 	MRS_FIELD(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic),
459 	MRS_FIELD(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32),
460 	MRS_FIELD(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2),
461 	MRS_FIELD(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1),
462 	MRS_FIELD(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes),
463 	MRS_FIELD_END,
464 };
465 
466 
467 /* ID_AA64ISAR1_EL1 */
468 static struct mrs_field_value id_aa64isar1_i8mm[] = {
469 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, I8MM, NONE, IMPL),
470 	MRS_FIELD_VALUE_END,
471 };
472 
473 static struct mrs_field_value id_aa64isar1_dgh[] = {
474 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DGH, NONE, IMPL),
475 	MRS_FIELD_VALUE_END,
476 };
477 
478 static struct mrs_field_value id_aa64isar1_bf16[] = {
479 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, BF16, NONE, IMPL),
480 	MRS_FIELD_VALUE_END,
481 };
482 
483 static struct mrs_field_value id_aa64isar1_specres[] = {
484 	MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_NONE, ""),
485 	MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_IMPL, "PredInv"),
486 	MRS_FIELD_VALUE_END,
487 };
488 
489 static struct mrs_field_value id_aa64isar1_sb[] = {
490 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, SB, NONE, IMPL),
491 	MRS_FIELD_VALUE_END,
492 };
493 
494 static struct mrs_field_value id_aa64isar1_frintts[] = {
495 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FRINTTS, NONE, IMPL),
496 	MRS_FIELD_VALUE_END,
497 };
498 
499 static struct mrs_field_value id_aa64isar1_gpi[] = {
500 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL),
501 	MRS_FIELD_VALUE_END,
502 };
503 
504 static struct mrs_field_value id_aa64isar1_gpa[] = {
505 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
506 	MRS_FIELD_VALUE_END,
507 };
508 
509 static struct mrs_field_value id_aa64isar1_lrcpc[] = {
510 	MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""),
511 	MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"),
512 	MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_4, "RCPC-8.4"),
513 	MRS_FIELD_VALUE_END,
514 };
515 
516 static struct mrs_field_value id_aa64isar1_fcma[] = {
517 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL),
518 	MRS_FIELD_VALUE_END,
519 };
520 
521 static struct mrs_field_value id_aa64isar1_jscvt[] = {
522 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL),
523 	MRS_FIELD_VALUE_END,
524 };
525 
526 static struct mrs_field_value id_aa64isar1_api[] = {
527 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, API, NONE, IMPL),
528 	MRS_FIELD_VALUE_END,
529 };
530 
531 static struct mrs_field_value id_aa64isar1_apa[] = {
532 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, APA, NONE, IMPL),
533 	MRS_FIELD_VALUE_END,
534 };
535 
536 static struct mrs_field_value id_aa64isar1_dpb[] = {
537 	MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""),
538 	MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"),
539 	MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVADP, "DCCVADP"),
540 	MRS_FIELD_VALUE_END,
541 };
542 
543 static struct mrs_field id_aa64isar1_fields[] = {
544 	MRS_FIELD(ID_AA64ISAR1, I8MM, false, MRS_LOWER, id_aa64isar1_i8mm),
545 	MRS_FIELD(ID_AA64ISAR1, DGH, false, MRS_LOWER, id_aa64isar1_dgh),
546 	MRS_FIELD(ID_AA64ISAR1, BF16, false, MRS_LOWER, id_aa64isar1_bf16),
547 	MRS_FIELD(ID_AA64ISAR1, SPECRES, false, MRS_LOWER,
548 	    id_aa64isar1_specres),
549 	MRS_FIELD(ID_AA64ISAR1, SB, false, MRS_LOWER, id_aa64isar1_sb),
550 	MRS_FIELD(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER,
551 	    id_aa64isar1_frintts),
552 	MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi),
553 	MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa),
554 	MRS_FIELD(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc),
555 	MRS_FIELD(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma),
556 	MRS_FIELD(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt),
557 	MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api),
558 	MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa),
559 	MRS_FIELD(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb),
560 	MRS_FIELD_END,
561 };
562 
563 
564 /* ID_AA64MMFR0_EL1 */
565 static struct mrs_field_value id_aa64mmfr0_tgran4[] = {
566 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL),
567 	MRS_FIELD_VALUE_END,
568 };
569 
570 static struct mrs_field_value id_aa64mmfr0_tgran64[] = {
571 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL),
572 	MRS_FIELD_VALUE_END,
573 };
574 
575 static struct mrs_field_value id_aa64mmfr0_tgran16[] = {
576 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL),
577 	MRS_FIELD_VALUE_END,
578 };
579 
580 static struct mrs_field_value id_aa64mmfr0_bigend_el0[] = {
581 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED),
582 	MRS_FIELD_VALUE_END,
583 };
584 
585 static struct mrs_field_value id_aa64mmfr0_snsmem[] = {
586 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT),
587 	MRS_FIELD_VALUE_END,
588 };
589 
590 static struct mrs_field_value id_aa64mmfr0_bigend[] = {
591 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED),
592 	MRS_FIELD_VALUE_END,
593 };
594 
595 static struct mrs_field_value id_aa64mmfr0_asid_bits[] = {
596 	MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"),
597 	MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"),
598 	MRS_FIELD_VALUE_END,
599 };
600 
601 static struct mrs_field_value id_aa64mmfr0_parange[] = {
602 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"),
603 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"),
604 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"),
605 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"),
606 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"),
607 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"),
608 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"),
609 	MRS_FIELD_VALUE_END,
610 };
611 
612 static struct mrs_field id_aa64mmfr0_fields[] = {
613 	MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4),
614 	MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT,
615 	    id_aa64mmfr0_tgran64),
616 	MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT,
617 	    id_aa64mmfr0_tgran16),
618 	MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT,
619 	    id_aa64mmfr0_bigend_el0),
620 	MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem),
621 	MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend),
622 	MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT,
623 	    id_aa64mmfr0_asid_bits),
624 	MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT,
625 	    id_aa64mmfr0_parange),
626 	MRS_FIELD_END,
627 };
628 
629 
630 /* ID_AA64MMFR1_EL1 */
631 static struct mrs_field_value id_aa64mmfr1_xnx[] = {
632 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL),
633 	MRS_FIELD_VALUE_END,
634 };
635 
636 static struct mrs_field_value id_aa64mmfr1_specsei[] = {
637 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL),
638 	MRS_FIELD_VALUE_END,
639 };
640 
641 static struct mrs_field_value id_aa64mmfr1_pan[] = {
642 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL),
643 	MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"),
644 	MRS_FIELD_VALUE_END,
645 };
646 
647 static struct mrs_field_value id_aa64mmfr1_lo[] = {
648 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL),
649 	MRS_FIELD_VALUE_END,
650 };
651 
652 static struct mrs_field_value id_aa64mmfr1_hpds[] = {
653 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""),
654 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"),
655 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"),
656 	MRS_FIELD_VALUE_END,
657 };
658 
659 static struct mrs_field_value id_aa64mmfr1_vh[] = {
660 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL),
661 	MRS_FIELD_VALUE_END,
662 };
663 
664 static struct mrs_field_value id_aa64mmfr1_vmidbits[] = {
665 	MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"),
666 	MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"),
667 	MRS_FIELD_VALUE_END,
668 };
669 
670 static struct mrs_field_value id_aa64mmfr1_hafdbs[] = {
671 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""),
672 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"),
673 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"),
674 	MRS_FIELD_VALUE_END,
675 };
676 
677 static struct mrs_field id_aa64mmfr1_fields[] = {
678 	MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx),
679 	MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT,
680 	    id_aa64mmfr1_specsei),
681 	MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan),
682 	MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo),
683 	MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds),
684 	MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh),
685 	MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT,
686 	    id_aa64mmfr1_vmidbits),
687 	MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs),
688 	MRS_FIELD_END,
689 };
690 
691 
692 /* ID_AA64MMFR2_EL1 */
693 static struct mrs_field_value id_aa64mmfr2_nv[] = {
694 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, IMPL),
695 	MRS_FIELD_VALUE_END,
696 };
697 
698 static struct mrs_field_value id_aa64mmfr2_ccidx[] = {
699 	MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"),
700 	MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "64bit CCIDX"),
701 	MRS_FIELD_VALUE_END,
702 };
703 
704 static struct mrs_field_value id_aa64mmfr2_varange[] = {
705 	MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"),
706 	MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"),
707 	MRS_FIELD_VALUE_END,
708 };
709 
710 static struct mrs_field_value id_aa64mmfr2_iesb[] = {
711 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL),
712 	MRS_FIELD_VALUE_END,
713 };
714 
715 static struct mrs_field_value id_aa64mmfr2_lsm[] = {
716 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL),
717 	MRS_FIELD_VALUE_END,
718 };
719 
720 static struct mrs_field_value id_aa64mmfr2_uao[] = {
721 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL),
722 	MRS_FIELD_VALUE_END,
723 };
724 
725 static struct mrs_field_value id_aa64mmfr2_cnp[] = {
726 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL),
727 	MRS_FIELD_VALUE_END,
728 };
729 
730 static struct mrs_field id_aa64mmfr2_fields[] = {
731 	MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv),
732 	MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx),
733 	MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT,
734 	    id_aa64mmfr2_varange),
735 	MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb),
736 	MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm),
737 	MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao),
738 	MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp),
739 	MRS_FIELD_END,
740 };
741 
742 
743 /* ID_AA64PFR0_EL1 */
744 static struct mrs_field_value id_aa64pfr0_csv3[] = {
745 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""),
746 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"),
747 	MRS_FIELD_VALUE_END,
748 };
749 
750 static struct mrs_field_value id_aa64pfr0_csv2[] = {
751 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""),
752 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"),
753 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "SCXTNUM"),
754 	MRS_FIELD_VALUE_END,
755 };
756 
757 static struct mrs_field_value id_aa64pfr0_dit[] = {
758 	MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""),
759 	MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"),
760 	MRS_FIELD_VALUE_END,
761 };
762 
763 static struct mrs_field_value id_aa64pfr0_amu[] = {
764 	MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""),
765 	MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"),
766 	MRS_FIELD_VALUE_END,
767 };
768 
769 static struct mrs_field_value id_aa64pfr0_mpam[] = {
770 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL),
771 	MRS_FIELD_VALUE_END,
772 };
773 
774 static struct mrs_field_value id_aa64pfr0_sel2[] = {
775 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL),
776 	MRS_FIELD_VALUE_END,
777 };
778 
779 static struct mrs_field_value id_aa64pfr0_sve[] = {
780 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL),
781 	MRS_FIELD_VALUE_END,
782 };
783 
784 static struct mrs_field_value id_aa64pfr0_ras[] = {
785 	MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""),
786 	MRS_FIELD_VALUE(ID_AA64PFR0_RAS_V1, "RASv1"),
787 	MRS_FIELD_VALUE_END,
788 };
789 
790 static struct mrs_field_value id_aa64pfr0_gic[] = {
791 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN),
792 	MRS_FIELD_VALUE_END,
793 };
794 
795 static struct mrs_field_value id_aa64pfr0_advsimd[] = {
796 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL),
797 	MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"),
798 	MRS_FIELD_VALUE_END,
799 };
800 
801 static struct mrs_field_value id_aa64pfr0_fp[] = {
802 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL),
803 	MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"),
804 	MRS_FIELD_VALUE_END,
805 };
806 
807 static struct mrs_field_value id_aa64pfr0_el3[] = {
808 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64),
809 	MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"),
810 	MRS_FIELD_VALUE_END,
811 };
812 
813 static struct mrs_field_value id_aa64pfr0_el2[] = {
814 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64),
815 	MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"),
816 	MRS_FIELD_VALUE_END,
817 };
818 
819 static struct mrs_field_value id_aa64pfr0_el1[] = {
820 	MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"),
821 	MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"),
822 	MRS_FIELD_VALUE_END,
823 };
824 
825 static struct mrs_field_value id_aa64pfr0_el0[] = {
826 	MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"),
827 	MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"),
828 	MRS_FIELD_VALUE_END,
829 };
830 
831 static struct mrs_field id_aa64pfr0_fields[] = {
832 	MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3),
833 	MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2),
834 	MRS_FIELD(ID_AA64PFR0, DIT, false, MRS_EXACT, id_aa64pfr0_dit),
835 	MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu),
836 	MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam),
837 	MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2),
838 	MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve),
839 	MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras),
840 	MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic),
841 	MRS_FIELD(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd),
842 	MRS_FIELD(ID_AA64PFR0, FP, true,  MRS_LOWER, id_aa64pfr0_fp),
843 	MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3),
844 	MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2),
845 	MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1),
846 	MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0),
847 	MRS_FIELD_END,
848 };
849 
850 
851 /* ID_AA64PFR1_EL1 */
852 static struct mrs_field_value id_aa64pfr1_bt[] = {
853 	MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""),
854 	MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"),
855 	MRS_FIELD_VALUE_END,
856 };
857 
858 static struct mrs_field_value id_aa64pfr1_ssbs[] = {
859 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""),
860 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"),
861 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"),
862 	MRS_FIELD_VALUE_END,
863 };
864 
865 static struct mrs_field_value id_aa64pfr1_mte[] = {
866 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""),
867 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL_EL0, "MTE EL0"),
868 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL, "MTE"),
869 	MRS_FIELD_VALUE_END,
870 };
871 
872 static struct mrs_field id_aa64pfr1_fields[] = {
873 	MRS_FIELD(ID_AA64PFR1, BT, false, MRS_EXACT, id_aa64pfr1_bt),
874 	MRS_FIELD(ID_AA64PFR1, SSBS, false, MRS_LOWER, id_aa64pfr1_ssbs),
875 	MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte),
876 	MRS_FIELD_END,
877 };
878 
879 struct mrs_user_reg {
880 	u_int		reg;
881 	u_int		CRm;
882 	u_int		Op2;
883 	size_t		offset;
884 	struct mrs_field *fields;
885 };
886 
887 static struct mrs_user_reg user_regs[] = {
888 	{	/* id_aa64isar0_el1 */
889 		.reg = ID_AA64ISAR0_EL1,
890 		.CRm = 6,
891 		.Op2 = 0,
892 		.offset = __offsetof(struct cpu_desc, id_aa64isar0),
893 		.fields = id_aa64isar0_fields,
894 	},
895 	{	/* id_aa64isar1_el1 */
896 		.reg = ID_AA64ISAR1_EL1,
897 		.CRm = 6,
898 		.Op2 = 1,
899 		.offset = __offsetof(struct cpu_desc, id_aa64isar1),
900 		.fields = id_aa64isar1_fields,
901 	},
902 	{	/* id_aa64pfr0_el1 */
903 		.reg = ID_AA64PFR0_EL1,
904 		.CRm = 4,
905 		.Op2 = 0,
906 		.offset = __offsetof(struct cpu_desc, id_aa64pfr0),
907 		.fields = id_aa64pfr0_fields,
908 	},
909 	{	/* id_aa64pfr0_el1 */
910 		.reg = ID_AA64PFR1_EL1,
911 		.CRm = 4,
912 		.Op2 = 1,
913 		.offset = __offsetof(struct cpu_desc, id_aa64pfr1),
914 		.fields = id_aa64pfr1_fields,
915 	},
916 	{	/* id_aa64dfr0_el1 */
917 		.reg = ID_AA64DFR0_EL1,
918 		.CRm = 5,
919 		.Op2 = 0,
920 		.offset = __offsetof(struct cpu_desc, id_aa64dfr0),
921 		.fields = id_aa64dfr0_fields,
922 	},
923 	{	/* id_aa64mmfr0_el1 */
924 		.reg = ID_AA64MMFR0_EL1,
925 		.CRm = 7,
926 		.Op2 = 0,
927 		.offset = __offsetof(struct cpu_desc, id_aa64mmfr0),
928 		.fields = id_aa64mmfr0_fields,
929 	},
930 };
931 
932 #define	CPU_DESC_FIELD(desc, idx)					\
933     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
934 
935 static int
936 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
937     uint32_t esr)
938 {
939 	uint64_t value;
940 	int CRm, Op2, i, reg;
941 
942 	if ((insn & MRS_MASK) != MRS_VALUE)
943 		return (0);
944 
945 	/*
946 	 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
947 	 * These are in the EL1 CPU identification space.
948 	 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
949 	 * CRm == {4-7} holds the ID_AA64 registers.
950 	 *
951 	 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
952 	 * Table D9-2 System instruction encodings for non-Debug System
953 	 * register accesses.
954 	 */
955 	if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
956 		return (0);
957 
958 	CRm = mrs_CRm(insn);
959 	if (CRm > 7 || (CRm < 4 && CRm != 0))
960 		return (0);
961 
962 	Op2 = mrs_Op2(insn);
963 	value = 0;
964 
965 	for (i = 0; i < nitems(user_regs); i++) {
966 		if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
967 			value = CPU_DESC_FIELD(user_cpu_desc, i);
968 			break;
969 		}
970 	}
971 
972 	if (CRm == 0) {
973 		switch (Op2) {
974 		case 0:
975 			value = READ_SPECIALREG(midr_el1);
976 			break;
977 		case 5:
978 			value = READ_SPECIALREG(mpidr_el1);
979 			break;
980 		case 6:
981 			value = READ_SPECIALREG(revidr_el1);
982 			break;
983 		default:
984 			return (0);
985 		}
986 	}
987 
988 	/*
989 	 * We will handle this instruction, move to the next so we
990 	 * don't trap here again.
991 	 */
992 	frame->tf_elr += INSN_SIZE;
993 
994 	reg = MRS_REGISTER(insn);
995 	/* If reg is 31 then write to xzr, i.e. do nothing */
996 	if (reg == 31)
997 		return (1);
998 
999 	if (reg < nitems(frame->tf_x))
1000 		frame->tf_x[reg] = value;
1001 	else if (reg == 30)
1002 		frame->tf_lr = value;
1003 
1004 	return (1);
1005 }
1006 
1007 bool
1008 extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val)
1009 {
1010 	uint64_t value;
1011 	int i;
1012 
1013 	for (i = 0; i < nitems(user_regs); i++) {
1014 		if (user_regs[i].reg == reg) {
1015 			value = CPU_DESC_FIELD(user_cpu_desc, i);
1016 			*val = value >> field_shift;
1017 			return (true);
1018 		}
1019 	}
1020 
1021 	return (false);
1022 }
1023 
1024 bool
1025 get_kernel_reg(u_int reg, uint64_t *val)
1026 {
1027 	int i;
1028 
1029 	for (i = 0; i < nitems(user_regs); i++) {
1030 		if (user_regs[i].reg == reg) {
1031 			*val = CPU_DESC_FIELD(kern_cpu_desc, i);
1032 			return (true);
1033 		}
1034 	}
1035 
1036 	return (false);
1037 }
1038 
1039 static uint64_t
1040 update_lower_register(uint64_t val, uint64_t new_val, u_int shift,
1041     int width, bool sign)
1042 {
1043 	uint64_t mask;
1044 	uint64_t new_field, old_field;
1045 	bool update;
1046 
1047 	KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__,
1048 	    width));
1049 
1050 	mask = (1ul << width) - 1;
1051 	new_field = (new_val >> shift) & mask;
1052 	old_field = (val >> shift) & mask;
1053 
1054 	update = false;
1055 	if (sign) {
1056 		/*
1057 		 * The field is signed. Toggle the upper bit so the comparison
1058 		 * works on unsigned values as this makes positive numbers,
1059 		 * i.e. those with a 0 bit, larger than negative numbers,
1060 		 * i.e. those with a 1 bit, in an unsigned comparison.
1061 		 */
1062 		if ((new_field ^ (1ul << (width - 1))) <
1063 		    (old_field ^ (1ul << (width - 1))))
1064 			update = true;
1065 	} else {
1066 		if (new_field < old_field)
1067 			update = true;
1068 	}
1069 
1070 	if (update) {
1071 		val &= ~(mask << shift);
1072 		val |= new_field << shift;
1073 	}
1074 
1075 	return (val);
1076 }
1077 
1078 void
1079 update_special_regs(u_int cpu)
1080 {
1081 	struct mrs_field *fields;
1082 	uint64_t user_reg, kern_reg, value;
1083 	int i, j;
1084 
1085 	if (cpu == 0) {
1086 		/* Create a user visible cpu description with safe values */
1087 		memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
1088 		/* Safe values for these registers */
1089 		user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
1090 		    ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 |
1091 		    ID_AA64PFR0_EL0_64;
1092 		user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
1093 	}
1094 
1095 	for (i = 0; i < nitems(user_regs); i++) {
1096 		value = CPU_DESC_FIELD(cpu_desc[cpu], i);
1097 		if (cpu == 0) {
1098 			kern_reg = value;
1099 			user_reg = value;
1100 		} else {
1101 			kern_reg = CPU_DESC_FIELD(kern_cpu_desc, i);
1102 			user_reg = CPU_DESC_FIELD(user_cpu_desc, i);
1103 		}
1104 
1105 		fields = user_regs[i].fields;
1106 		for (j = 0; fields[j].type != 0; j++) {
1107 			switch (fields[j].type & MRS_TYPE_MASK) {
1108 			case MRS_EXACT:
1109 				user_reg &= ~(0xful << fields[j].shift);
1110 				user_reg |=
1111 				    (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
1112 				    fields[j].shift;
1113 				break;
1114 			case MRS_LOWER:
1115 				user_reg = update_lower_register(user_reg,
1116 				    value, fields[j].shift, 4, fields[j].sign);
1117 				break;
1118 			default:
1119 				panic("Invalid field type: %d", fields[j].type);
1120 			}
1121 			kern_reg = update_lower_register(kern_reg, value,
1122 			    fields[j].shift, 4, fields[j].sign);
1123 		}
1124 
1125 		CPU_DESC_FIELD(kern_cpu_desc, i) = kern_reg;
1126 		CPU_DESC_FIELD(user_cpu_desc, i) = user_reg;
1127 	}
1128 }
1129 
1130 /* HWCAP */
1131 bool __read_frequently lse_supported = false;
1132 
1133 bool __read_frequently icache_aliasing = false;
1134 bool __read_frequently icache_vmid = false;
1135 
1136 int64_t dcache_line_size;	/* The minimum D cache line size */
1137 int64_t icache_line_size;	/* The minimum I cache line size */
1138 int64_t idcache_line_size;	/* The minimum cache line size */
1139 
1140 static void
1141 identify_cpu_sysinit(void *dummy __unused)
1142 {
1143 	int cpu;
1144 	bool dic, idc;
1145 
1146 	dic = (allow_dic != 0);
1147 	idc = (allow_idc != 0);
1148 
1149 	CPU_FOREACH(cpu) {
1150 		check_cpu_regs(cpu);
1151 		if (cpu != 0)
1152 			update_special_regs(cpu);
1153 
1154 		if (CTR_DIC_VAL(cpu_desc[cpu].ctr) == 0)
1155 			dic = false;
1156 		if (CTR_IDC_VAL(cpu_desc[cpu].ctr) == 0)
1157 			idc = false;
1158 	}
1159 
1160 	/* Exposed to userspace as AT_HWCAP and AT_HWCAP2 */
1161 	elf_hwcap = parse_cpu_features_hwcap();
1162 	elf_hwcap2 = parse_cpu_features_hwcap2();
1163 
1164 	if (dic && idc) {
1165 		arm64_icache_sync_range = &arm64_dic_idc_icache_sync_range;
1166 		if (bootverbose)
1167 			printf("Enabling DIC & IDC ICache sync\n");
1168 	}
1169 
1170 	if ((elf_hwcap & HWCAP_ATOMICS) != 0) {
1171 		lse_supported = true;
1172 		if (bootverbose)
1173 			printf("Enabling LSE atomics in the kernel\n");
1174 	}
1175 #ifdef LSE_ATOMICS
1176 	if (!lse_supported)
1177 		panic("CPU does not support LSE atomic instructions");
1178 #endif
1179 
1180 	install_undef_handler(true, user_mrs_handler);
1181 }
1182 SYSINIT(identify_cpu, SI_SUB_CPU, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
1183 
1184 static void
1185 cpu_features_sysinit(void *dummy __unused)
1186 {
1187 	u_int cpu;
1188 
1189 	CPU_FOREACH(cpu)
1190 		print_cpu_features(cpu);
1191 }
1192 SYSINIT(cpu_features, SI_SUB_SMP, SI_ORDER_ANY, cpu_features_sysinit, NULL);
1193 
1194 static u_long
1195 parse_cpu_features_hwcap(void)
1196 {
1197 	u_long hwcap = 0;
1198 
1199 	switch (ID_AA64ISAR0_TS_VAL(user_cpu_desc.id_aa64isar0)) {
1200 	case ID_AA64ISAR0_TS_CondM_8_4:
1201 	case ID_AA64ISAR0_TS_CondM_8_5:
1202 		hwcap |= HWCAP_FLAGM;
1203 		break;
1204 	default:
1205 		break;
1206 	}
1207 
1208 	if (ID_AA64ISAR0_DP_VAL(user_cpu_desc.id_aa64isar0) ==
1209 	    ID_AA64ISAR0_DP_IMPL)
1210 		hwcap |= HWCAP_ASIMDDP;
1211 
1212 	if (ID_AA64ISAR0_SM4_VAL(user_cpu_desc.id_aa64isar0) ==
1213 	    ID_AA64ISAR0_SM4_IMPL)
1214 		hwcap |= HWCAP_SM4;
1215 
1216 	if (ID_AA64ISAR0_SM3_VAL(user_cpu_desc.id_aa64isar0) ==
1217 	    ID_AA64ISAR0_SM3_IMPL)
1218 		hwcap |= HWCAP_SM3;
1219 
1220 	if (ID_AA64ISAR0_SHA3_VAL(user_cpu_desc.id_aa64isar0) ==
1221 	    ID_AA64ISAR0_SHA3_IMPL)
1222 		hwcap |= HWCAP_SHA3;
1223 
1224 	if (ID_AA64ISAR0_RDM_VAL(user_cpu_desc.id_aa64isar0) ==
1225 	    ID_AA64ISAR0_RDM_IMPL)
1226 		hwcap |= HWCAP_ASIMDRDM;
1227 
1228 	if (ID_AA64ISAR0_Atomic_VAL(user_cpu_desc.id_aa64isar0) ==
1229 	    ID_AA64ISAR0_Atomic_IMPL)
1230 		hwcap |= HWCAP_ATOMICS;
1231 
1232 	if (ID_AA64ISAR0_CRC32_VAL(user_cpu_desc.id_aa64isar0) ==
1233 	    ID_AA64ISAR0_CRC32_BASE)
1234 		hwcap |= HWCAP_CRC32;
1235 
1236 	switch (ID_AA64ISAR0_SHA2_VAL(user_cpu_desc.id_aa64isar0)) {
1237 	case ID_AA64ISAR0_SHA2_BASE:
1238 		hwcap |= HWCAP_SHA2;
1239 		break;
1240 	case ID_AA64ISAR0_SHA2_512:
1241 		hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
1242 		break;
1243 	default:
1244 		break;
1245 	}
1246 
1247 	if (ID_AA64ISAR0_SHA1_VAL(user_cpu_desc.id_aa64isar0) ==
1248 	    ID_AA64ISAR0_SHA1_BASE)
1249 		hwcap |= HWCAP_SHA1;
1250 
1251 	switch (ID_AA64ISAR0_AES_VAL(user_cpu_desc.id_aa64isar0)) {
1252 	case ID_AA64ISAR0_AES_BASE:
1253 		hwcap |= HWCAP_AES;
1254 		break;
1255 	case ID_AA64ISAR0_AES_PMULL:
1256 		hwcap |= HWCAP_PMULL | HWCAP_AES;
1257 		break;
1258 	default:
1259 		break;
1260 	}
1261 
1262 	if (ID_AA64ISAR1_SB_VAL(user_cpu_desc.id_aa64isar1) ==
1263 	    ID_AA64ISAR1_SB_IMPL)
1264 		hwcap |= HWCAP_SB;
1265 
1266 	switch (ID_AA64ISAR1_LRCPC_VAL(user_cpu_desc.id_aa64isar1)) {
1267 	case ID_AA64ISAR1_LRCPC_RCPC_8_3:
1268 		hwcap |= HWCAP_LRCPC;
1269 		break;
1270 	case ID_AA64ISAR1_LRCPC_RCPC_8_4:
1271 		hwcap |= HWCAP_LRCPC | HWCAP_ILRCPC;
1272 		break;
1273 	default:
1274 		break;
1275 	}
1276 
1277 	if (ID_AA64ISAR1_FCMA_VAL(user_cpu_desc.id_aa64isar1) ==
1278 	    ID_AA64ISAR1_FCMA_IMPL)
1279 		hwcap |= HWCAP_FCMA;
1280 
1281 	if (ID_AA64ISAR1_JSCVT_VAL(user_cpu_desc.id_aa64isar1) ==
1282 	    ID_AA64ISAR1_JSCVT_IMPL)
1283 		hwcap |= HWCAP_JSCVT;
1284 
1285 	if (ID_AA64ISAR1_DPB_VAL(user_cpu_desc.id_aa64isar1) ==
1286 	    ID_AA64ISAR1_DPB_DCCVAP)
1287 		hwcap |= HWCAP_DCPOP;
1288 
1289 	if (ID_AA64PFR0_SVE_VAL(user_cpu_desc.id_aa64pfr0) ==
1290 	    ID_AA64PFR0_SVE_IMPL)
1291 		hwcap |= HWCAP_SVE;
1292 
1293 	switch (ID_AA64PFR0_AdvSIMD_VAL(user_cpu_desc.id_aa64pfr0)) {
1294 	case ID_AA64PFR0_AdvSIMD_IMPL:
1295 		hwcap |= HWCAP_ASIMD;
1296 		break;
1297 	case ID_AA64PFR0_AdvSIMD_HP:
1298 		hwcap |= HWCAP_ASIMD | HWCAP_ASIMDHP;
1299 		break;
1300 	default:
1301 		break;
1302 	}
1303 
1304 	switch (ID_AA64PFR0_FP_VAL(user_cpu_desc.id_aa64pfr0)) {
1305 	case ID_AA64PFR0_FP_IMPL:
1306 		hwcap |= HWCAP_FP;
1307 		break;
1308 	case ID_AA64PFR0_FP_HP:
1309 		hwcap |= HWCAP_FP | HWCAP_FPHP;
1310 		break;
1311 	default:
1312 		break;
1313 	}
1314 
1315 	if (ID_AA64PFR1_SSBS_VAL(user_cpu_desc.id_aa64pfr1) ==
1316 	    ID_AA64PFR1_SSBS_PSTATE_MSR)
1317 		hwcap |= HWCAP_SSBS;
1318 
1319 	return (hwcap);
1320 }
1321 
1322 static u_long
1323 parse_cpu_features_hwcap2(void)
1324 {
1325 	u_long hwcap2 = 0;
1326 
1327 	if (ID_AA64ISAR0_RNDR_VAL(user_cpu_desc.id_aa64isar0) ==
1328 	    ID_AA64ISAR0_RNDR_IMPL)
1329 		hwcap2 |= HWCAP2_RNG;
1330 
1331 	if (ID_AA64ISAR0_TS_VAL(user_cpu_desc.id_aa64isar0) ==
1332 	    ID_AA64ISAR0_TS_CondM_8_5)
1333 		hwcap2 |= HWCAP2_FLAGM2;
1334 
1335 	if (ID_AA64ISAR1_I8MM_VAL(user_cpu_desc.id_aa64isar1) ==
1336 	    ID_AA64ISAR1_I8MM_IMPL)
1337 		hwcap2 |= HWCAP2_I8MM;
1338 
1339 	if (ID_AA64ISAR1_DGH_VAL(user_cpu_desc.id_aa64isar1) ==
1340 	    ID_AA64ISAR1_DGH_IMPL)
1341 		hwcap2 |= HWCAP2_DGH;
1342 
1343 	if (ID_AA64ISAR1_BF16_VAL(user_cpu_desc.id_aa64isar1) ==
1344 	    ID_AA64ISAR1_BF16_IMPL)
1345 		hwcap2 |= HWCAP2_BF16;
1346 
1347 	if (ID_AA64ISAR1_FRINTTS_VAL(user_cpu_desc.id_aa64isar1) ==
1348 	    ID_AA64ISAR1_FRINTTS_IMPL)
1349 		hwcap2 |= HWCAP2_FRINT;
1350 
1351 	if (ID_AA64ISAR1_DPB_VAL(user_cpu_desc.id_aa64isar1) ==
1352 	    ID_AA64ISAR1_DPB_DCCVADP)
1353 		hwcap2 |= HWCAP2_DCPODP;
1354 
1355 	if (ID_AA64PFR1_BT_VAL(user_cpu_desc.id_aa64pfr1) ==
1356 	    ID_AA64PFR1_BT_IMPL)
1357 		hwcap2 |= HWCAP2_BTI;
1358 
1359 	return (hwcap2);
1360 }
1361 
1362 static void
1363 print_ctr_fields(struct sbuf *sb, uint64_t reg, void *arg)
1364 {
1365 
1366 	sbuf_printf(sb, "%u byte D-cacheline,", CTR_DLINE_SIZE(reg));
1367 	sbuf_printf(sb, "%u byte I-cacheline,", CTR_ILINE_SIZE(reg));
1368 	reg &= ~(CTR_DLINE_MASK | CTR_ILINE_MASK);
1369 
1370 	switch(CTR_L1IP_VAL(reg)) {
1371 	case CTR_L1IP_VPIPT:
1372 		sbuf_printf(sb, "VPIPT");
1373 		break;
1374 	case CTR_L1IP_AIVIVT:
1375 		sbuf_printf(sb, "AIVIVT");
1376 		break;
1377 	case CTR_L1IP_VIPT:
1378 		sbuf_printf(sb, "VIPT");
1379 		break;
1380 	case CTR_L1IP_PIPT:
1381 		sbuf_printf(sb, "PIPT");
1382 		break;
1383 	}
1384 	sbuf_printf(sb, " ICache,");
1385 	reg &= ~CTR_L1IP_MASK;
1386 
1387 	sbuf_printf(sb, "%d byte ERG,", CTR_ERG_SIZE(reg));
1388 	sbuf_printf(sb, "%d byte CWG", CTR_CWG_SIZE(reg));
1389 	reg &= ~(CTR_ERG_MASK | CTR_CWG_MASK);
1390 
1391 	if (CTR_IDC_VAL(reg) != 0)
1392 		sbuf_printf(sb, ",IDC");
1393 	if (CTR_DIC_VAL(reg) != 0)
1394 		sbuf_printf(sb, ",DIC");
1395 	reg &= ~(CTR_IDC_MASK | CTR_DIC_MASK);
1396 	reg &= ~CTR_RES1;
1397 
1398 	if (reg != 0)
1399 		sbuf_printf(sb, ",%lx", reg);
1400 }
1401 
1402 static void
1403 print_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
1404     void (*print_fields)(struct sbuf *, uint64_t, void *), void *arg)
1405 {
1406 
1407 	sbuf_printf(sb, "%29s = <", reg_name);
1408 
1409 	print_fields(sb, reg, arg);
1410 
1411 	sbuf_finish(sb);
1412 	printf("%s>\n", sbuf_data(sb));
1413 	sbuf_clear(sb);
1414 }
1415 
1416 static void
1417 print_id_fields(struct sbuf *sb, uint64_t reg, void *arg)
1418 {
1419 	struct mrs_field *fields = arg;
1420 	struct mrs_field_value *fv;
1421 	int field, i, j, printed;
1422 
1423 #define SEP_STR	((printed++) == 0) ? "" : ","
1424 	printed = 0;
1425 	for (i = 0; fields[i].type != 0; i++) {
1426 		fv = fields[i].values;
1427 
1428 		/* TODO: Handle with an unknown message */
1429 		if (fv == NULL)
1430 			continue;
1431 
1432 		field = (reg & fields[i].mask) >> fields[i].shift;
1433 		for (j = 0; fv[j].desc != NULL; j++) {
1434 			if ((fv[j].value >> fields[i].shift) != field)
1435 				continue;
1436 
1437 			if (fv[j].desc[0] != '\0')
1438 				sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc);
1439 			break;
1440 		}
1441 		if (fv[j].desc == NULL)
1442 			sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR,
1443 			    fields[i].name, field);
1444 
1445 		reg &= ~(0xful << fields[i].shift);
1446 	}
1447 
1448 	if (reg != 0)
1449 		sbuf_printf(sb, "%s%#lx", SEP_STR, reg);
1450 #undef SEP_STR
1451 }
1452 
1453 static void
1454 print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
1455     struct mrs_field *fields)
1456 {
1457 
1458 	print_register(sb, reg_name, reg, print_id_fields, fields);
1459 }
1460 
1461 static void
1462 print_cpu_features(u_int cpu)
1463 {
1464 	struct sbuf *sb;
1465 
1466 	sb = sbuf_new_auto();
1467 	sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
1468 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1469 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1470 
1471 	sbuf_cat(sb, " affinity:");
1472 	switch(cpu_aff_levels) {
1473 	default:
1474 	case 4:
1475 		sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
1476 		/* FALLTHROUGH */
1477 	case 3:
1478 		sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
1479 		/* FALLTHROUGH */
1480 	case 2:
1481 		sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
1482 		/* FALLTHROUGH */
1483 	case 1:
1484 	case 0: /* On UP this will be zero */
1485 		sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
1486 		break;
1487 	}
1488 	sbuf_finish(sb);
1489 	printf("%s\n", sbuf_data(sb));
1490 	sbuf_clear(sb);
1491 
1492 	/*
1493 	 * There is a hardware errata where, if one CPU is performing a TLB
1494 	 * invalidation while another is performing a store-exclusive the
1495 	 * store-exclusive may return the wrong status. A workaround seems
1496 	 * to be to use an IPI to invalidate on each CPU, however given the
1497 	 * limited number of affected units (pass 1.1 is the evaluation
1498 	 * hardware revision), and the lack of information from Cavium
1499 	 * this has not been implemented.
1500 	 *
1501 	 * At the time of writing this the only information is from:
1502 	 * https://lkml.org/lkml/2016/8/4/722
1503 	 */
1504 	/*
1505 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
1506 	 * triggers on pass 2.0+.
1507 	 */
1508 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
1509 	    CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
1510 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
1511 		    "hardware bugs that may cause the incorrect operation of "
1512 		    "atomic operations.\n");
1513 
1514 	/* Cache Type Register */
1515 	if (cpu == 0 || (cpu_print_regs & PRINT_CTR_EL0) != 0) {
1516 		print_register(sb, "Cache Type",
1517 		    cpu_desc[cpu].ctr, print_ctr_fields, NULL);
1518 	}
1519 
1520 	/* AArch64 Instruction Set Attribute Register 0 */
1521 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0)
1522 		print_id_register(sb, "Instruction Set Attributes 0",
1523 		    cpu_desc[cpu].id_aa64isar0, id_aa64isar0_fields);
1524 
1525 	/* AArch64 Instruction Set Attribute Register 1 */
1526 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0)
1527 		print_id_register(sb, "Instruction Set Attributes 1",
1528 		    cpu_desc[cpu].id_aa64isar1, id_aa64isar1_fields);
1529 
1530 	/* AArch64 Processor Feature Register 0 */
1531 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0)
1532 		print_id_register(sb, "Processor Features 0",
1533 		    cpu_desc[cpu].id_aa64pfr0, id_aa64pfr0_fields);
1534 
1535 	/* AArch64 Processor Feature Register 1 */
1536 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0)
1537 		print_id_register(sb, "Processor Features 1",
1538 		    cpu_desc[cpu].id_aa64pfr1, id_aa64pfr1_fields);
1539 
1540 	/* AArch64 Memory Model Feature Register 0 */
1541 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0)
1542 		print_id_register(sb, "Memory Model Features 0",
1543 		    cpu_desc[cpu].id_aa64mmfr0, id_aa64mmfr0_fields);
1544 
1545 	/* AArch64 Memory Model Feature Register 1 */
1546 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0)
1547 		print_id_register(sb, "Memory Model Features 1",
1548 		    cpu_desc[cpu].id_aa64mmfr1, id_aa64mmfr1_fields);
1549 
1550 	/* AArch64 Memory Model Feature Register 2 */
1551 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0)
1552 		print_id_register(sb, "Memory Model Features 2",
1553 		    cpu_desc[cpu].id_aa64mmfr2, id_aa64mmfr2_fields);
1554 
1555 	/* AArch64 Debug Feature Register 0 */
1556 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0)
1557 		print_id_register(sb, "Debug Features 0",
1558 		    cpu_desc[cpu].id_aa64dfr0, id_aa64dfr0_fields);
1559 
1560 	/* AArch64 Memory Model Feature Register 1 */
1561 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0)
1562 		print_id_register(sb, "Debug Features 1",
1563 		    cpu_desc[cpu].id_aa64dfr1, id_aa64dfr1_fields);
1564 
1565 	/* AArch64 Auxiliary Feature Register 0 */
1566 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0)
1567 		print_id_register(sb, "Auxiliary Features 0",
1568 		    cpu_desc[cpu].id_aa64afr0, id_aa64afr0_fields);
1569 
1570 	/* AArch64 Auxiliary Feature Register 1 */
1571 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0)
1572 		print_id_register(sb, "Auxiliary Features 1",
1573 		    cpu_desc[cpu].id_aa64afr1, id_aa64afr1_fields);
1574 
1575 	sbuf_delete(sb);
1576 	sb = NULL;
1577 #undef SEP_STR
1578 }
1579 
1580 void
1581 identify_cache(uint64_t ctr)
1582 {
1583 
1584 	/* Identify the L1 cache type */
1585 	switch (CTR_L1IP_VAL(ctr)) {
1586 	case CTR_L1IP_PIPT:
1587 		break;
1588 	case CTR_L1IP_VPIPT:
1589 		icache_vmid = true;
1590 		break;
1591 	default:
1592 	case CTR_L1IP_VIPT:
1593 		icache_aliasing = true;
1594 		break;
1595 	}
1596 
1597 	if (dcache_line_size == 0) {
1598 		KASSERT(icache_line_size == 0, ("%s: i-cacheline size set: %ld",
1599 		    __func__, icache_line_size));
1600 
1601 		/* Get the D cache line size */
1602 		dcache_line_size = CTR_DLINE_SIZE(ctr);
1603 		/* And the same for the I cache */
1604 		icache_line_size = CTR_ILINE_SIZE(ctr);
1605 
1606 		idcache_line_size = MIN(dcache_line_size, icache_line_size);
1607 	}
1608 
1609 	if (dcache_line_size != CTR_DLINE_SIZE(ctr)) {
1610 		printf("WARNING: D-cacheline size mismatch %ld != %d\n",
1611 		    dcache_line_size, CTR_DLINE_SIZE(ctr));
1612 	}
1613 
1614 	if (icache_line_size != CTR_ILINE_SIZE(ctr)) {
1615 		printf("WARNING: I-cacheline size mismatch %ld != %d\n",
1616 		    icache_line_size, CTR_ILINE_SIZE(ctr));
1617 	}
1618 }
1619 
1620 void
1621 identify_cpu(u_int cpu)
1622 {
1623 	u_int midr;
1624 	u_int impl_id;
1625 	u_int part_id;
1626 	size_t i;
1627 	const struct cpu_parts *cpu_partsp = NULL;
1628 
1629 	midr = get_midr();
1630 
1631 	impl_id = CPU_IMPL(midr);
1632 	for (i = 0; i < nitems(cpu_implementers); i++) {
1633 		if (impl_id == cpu_implementers[i].impl_id ||
1634 		    cpu_implementers[i].impl_id == 0) {
1635 			cpu_desc[cpu].cpu_impl = impl_id;
1636 			cpu_desc[cpu].cpu_impl_name =
1637 			    cpu_implementers[i].impl_name;
1638 			cpu_partsp = cpu_implementers[i].cpu_parts;
1639 			break;
1640 		}
1641 	}
1642 
1643 	part_id = CPU_PART(midr);
1644 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
1645 		if (part_id == cpu_partsp[i].part_id ||
1646 		    cpu_partsp[i].part_id == 0) {
1647 			cpu_desc[cpu].cpu_part_num = part_id;
1648 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1649 			break;
1650 		}
1651 	}
1652 
1653 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1654 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1655 
1656 	snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
1657 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1658 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1659 
1660 	/* Save affinity for current CPU */
1661 	cpu_desc[cpu].mpidr = get_mpidr();
1662 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1663 
1664 	cpu_desc[cpu].ctr = READ_SPECIALREG(ctr_el0);
1665 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
1666 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
1667 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
1668 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
1669 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
1670 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1671 	cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1);
1672 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
1673 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
1674 }
1675 
1676 static void
1677 check_cpu_regs(u_int cpu)
1678 {
1679 
1680 	switch (cpu_aff_levels) {
1681 	case 0:
1682 		if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1683 		    CPU_AFF0(cpu_desc[0].mpidr))
1684 			cpu_aff_levels = 1;
1685 		/* FALLTHROUGH */
1686 	case 1:
1687 		if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1688 		    CPU_AFF1(cpu_desc[0].mpidr))
1689 			cpu_aff_levels = 2;
1690 		/* FALLTHROUGH */
1691 	case 2:
1692 		if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1693 		    CPU_AFF2(cpu_desc[0].mpidr))
1694 			cpu_aff_levels = 3;
1695 		/* FALLTHROUGH */
1696 	case 3:
1697 		if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1698 		    CPU_AFF3(cpu_desc[0].mpidr))
1699 			cpu_aff_levels = 4;
1700 		break;
1701 	}
1702 
1703 	if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1704 		cpu_print_regs |= PRINT_ID_AA64_AFR0;
1705 	if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1706 		cpu_print_regs |= PRINT_ID_AA64_AFR1;
1707 
1708 	if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1709 		cpu_print_regs |= PRINT_ID_AA64_DFR0;
1710 	if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1711 		cpu_print_regs |= PRINT_ID_AA64_DFR1;
1712 
1713 	if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1714 		cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1715 	if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1716 		cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1717 
1718 	if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1719 		cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1720 	if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1721 		cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1722 	if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1723 		cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1724 
1725 	if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1726 		cpu_print_regs |= PRINT_ID_AA64_PFR0;
1727 	if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1728 		cpu_print_regs |= PRINT_ID_AA64_PFR1;
1729 
1730 	if (cpu_desc[cpu].ctr != cpu_desc[0].ctr) {
1731 		/*
1732 		 * If the cache type register is different we may
1733 		 * have a different l1 cache type.
1734 		 */
1735 		identify_cache(cpu_desc[cpu].ctr);
1736 		cpu_print_regs |= PRINT_CTR_EL0;
1737 	}
1738 }
1739