xref: /linux/arch/arm64/kernel/cpuinfo.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Record and handle CPU attributes.
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 #include <asm/arch_timer.h>
8 #include <asm/cache.h>
9 #include <asm/cpu.h>
10 #include <asm/cputype.h>
11 #include <asm/cpufeature.h>
12 #include <asm/fpsimd.h>
13 
14 #include <linux/bitops.h>
15 #include <linux/bug.h>
16 #include <linux/compat.h>
17 #include <linux/elf.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/personality.h>
21 #include <linux/preempt.h>
22 #include <linux/printk.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched.h>
25 #include <linux/smp.h>
26 #include <linux/delay.h>
27 
28 /*
29  * In case the boot CPU is hotpluggable, we record its initial state and
30  * current state separately. Certain system registers may contain different
31  * values depending on configuration at or after reset.
32  */
33 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
34 static struct cpuinfo_arm64 boot_cpu_data;
35 
36 static const char *icache_policy_str[] = {
37 	[0 ... ICACHE_POLICY_PIPT]	= "RESERVED/UNKNOWN",
38 	[ICACHE_POLICY_VIPT]		= "VIPT",
39 	[ICACHE_POLICY_PIPT]		= "PIPT",
40 	[ICACHE_POLICY_VPIPT]		= "VPIPT",
41 };
42 
43 unsigned long __icache_flags;
44 
45 static const char *const hwcap_str[] = {
46 	"fp",
47 	"asimd",
48 	"evtstrm",
49 	"aes",
50 	"pmull",
51 	"sha1",
52 	"sha2",
53 	"crc32",
54 	"atomics",
55 	"fphp",
56 	"asimdhp",
57 	"cpuid",
58 	"asimdrdm",
59 	"jscvt",
60 	"fcma",
61 	"lrcpc",
62 	"dcpop",
63 	"sha3",
64 	"sm3",
65 	"sm4",
66 	"asimddp",
67 	"sha512",
68 	"sve",
69 	"asimdfhm",
70 	"dit",
71 	"uscat",
72 	"ilrcpc",
73 	"flagm",
74 	"ssbs",
75 	"sb",
76 	"paca",
77 	"pacg",
78 	"dcpodp",
79 	"sve2",
80 	"sveaes",
81 	"svepmull",
82 	"svebitperm",
83 	"svesha3",
84 	"svesm4",
85 	"flagm2",
86 	"frint",
87 	"svei8mm",
88 	"svef32mm",
89 	"svef64mm",
90 	"svebf16",
91 	"i8mm",
92 	"bf16",
93 	"dgh",
94 	"rng",
95 	NULL
96 };
97 
98 #ifdef CONFIG_COMPAT
99 static const char *const compat_hwcap_str[] = {
100 	"swp",
101 	"half",
102 	"thumb",
103 	"26bit",
104 	"fastmult",
105 	"fpa",
106 	"vfp",
107 	"edsp",
108 	"java",
109 	"iwmmxt",
110 	"crunch",
111 	"thumbee",
112 	"neon",
113 	"vfpv3",
114 	"vfpv3d16",
115 	"tls",
116 	"vfpv4",
117 	"idiva",
118 	"idivt",
119 	"vfpd32",
120 	"lpae",
121 	"evtstrm",
122 	NULL
123 };
124 
125 static const char *const compat_hwcap2_str[] = {
126 	"aes",
127 	"pmull",
128 	"sha1",
129 	"sha2",
130 	"crc32",
131 	NULL
132 };
133 #endif /* CONFIG_COMPAT */
134 
135 static int c_show(struct seq_file *m, void *v)
136 {
137 	int i, j;
138 	bool compat = personality(current->personality) == PER_LINUX32;
139 
140 	for_each_online_cpu(i) {
141 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
142 		u32 midr = cpuinfo->reg_midr;
143 
144 		/*
145 		 * glibc reads /proc/cpuinfo to determine the number of
146 		 * online processors, looking for lines beginning with
147 		 * "processor".  Give glibc what it expects.
148 		 */
149 		seq_printf(m, "processor\t: %d\n", i);
150 		if (compat)
151 			seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
152 				   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
153 
154 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
155 			   loops_per_jiffy / (500000UL/HZ),
156 			   loops_per_jiffy / (5000UL/HZ) % 100);
157 
158 		/*
159 		 * Dump out the common processor features in a single line.
160 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
161 		 * rather than attempting to parse this, but there's a body of
162 		 * software which does already (at least for 32-bit).
163 		 */
164 		seq_puts(m, "Features\t:");
165 		if (compat) {
166 #ifdef CONFIG_COMPAT
167 			for (j = 0; compat_hwcap_str[j]; j++)
168 				if (compat_elf_hwcap & (1 << j))
169 					seq_printf(m, " %s", compat_hwcap_str[j]);
170 
171 			for (j = 0; compat_hwcap2_str[j]; j++)
172 				if (compat_elf_hwcap2 & (1 << j))
173 					seq_printf(m, " %s", compat_hwcap2_str[j]);
174 #endif /* CONFIG_COMPAT */
175 		} else {
176 			for (j = 0; hwcap_str[j]; j++)
177 				if (cpu_have_feature(j))
178 					seq_printf(m, " %s", hwcap_str[j]);
179 		}
180 		seq_puts(m, "\n");
181 
182 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
183 			   MIDR_IMPLEMENTOR(midr));
184 		seq_printf(m, "CPU architecture: 8\n");
185 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
186 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
187 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
188 	}
189 
190 	return 0;
191 }
192 
193 static void *c_start(struct seq_file *m, loff_t *pos)
194 {
195 	return *pos < 1 ? (void *)1 : NULL;
196 }
197 
198 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
199 {
200 	++*pos;
201 	return NULL;
202 }
203 
204 static void c_stop(struct seq_file *m, void *v)
205 {
206 }
207 
208 const struct seq_operations cpuinfo_op = {
209 	.start	= c_start,
210 	.next	= c_next,
211 	.stop	= c_stop,
212 	.show	= c_show
213 };
214 
215 
216 static struct kobj_type cpuregs_kobj_type = {
217 	.sysfs_ops = &kobj_sysfs_ops,
218 };
219 
220 /*
221  * The ARM ARM uses the phrase "32-bit register" to describe a register
222  * whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however
223  * no statement is made as to whether the upper 32 bits will or will not
224  * be made use of in future, and between ARM DDI 0487A.c and ARM DDI
225  * 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit.
226  *
227  * Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit
228  * registers, we expose them both as 64 bit values to cater for possible
229  * future expansion without an ABI break.
230  */
231 #define kobj_to_cpuinfo(kobj)	container_of(kobj, struct cpuinfo_arm64, kobj)
232 #define CPUREGS_ATTR_RO(_name, _field)						\
233 	static ssize_t _name##_show(struct kobject *kobj,			\
234 			struct kobj_attribute *attr, char *buf)			\
235 	{									\
236 		struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj);		\
237 										\
238 		if (info->reg_midr)						\
239 			return sprintf(buf, "0x%016x\n", info->reg_##_field);	\
240 		else								\
241 			return 0;						\
242 	}									\
243 	static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name)
244 
245 CPUREGS_ATTR_RO(midr_el1, midr);
246 CPUREGS_ATTR_RO(revidr_el1, revidr);
247 
248 static struct attribute *cpuregs_id_attrs[] = {
249 	&cpuregs_attr_midr_el1.attr,
250 	&cpuregs_attr_revidr_el1.attr,
251 	NULL
252 };
253 
254 static const struct attribute_group cpuregs_attr_group = {
255 	.attrs = cpuregs_id_attrs,
256 	.name = "identification"
257 };
258 
259 static int cpuid_cpu_online(unsigned int cpu)
260 {
261 	int rc;
262 	struct device *dev;
263 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
264 
265 	dev = get_cpu_device(cpu);
266 	if (!dev) {
267 		rc = -ENODEV;
268 		goto out;
269 	}
270 	rc = kobject_add(&info->kobj, &dev->kobj, "regs");
271 	if (rc)
272 		goto out;
273 	rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
274 	if (rc)
275 		kobject_del(&info->kobj);
276 out:
277 	return rc;
278 }
279 
280 static int cpuid_cpu_offline(unsigned int cpu)
281 {
282 	struct device *dev;
283 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
284 
285 	dev = get_cpu_device(cpu);
286 	if (!dev)
287 		return -ENODEV;
288 	if (info->kobj.parent) {
289 		sysfs_remove_group(&info->kobj, &cpuregs_attr_group);
290 		kobject_del(&info->kobj);
291 	}
292 
293 	return 0;
294 }
295 
296 static int __init cpuinfo_regs_init(void)
297 {
298 	int cpu, ret;
299 
300 	for_each_possible_cpu(cpu) {
301 		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
302 
303 		kobject_init(&info->kobj, &cpuregs_kobj_type);
304 	}
305 
306 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
307 				cpuid_cpu_online, cpuid_cpu_offline);
308 	if (ret < 0) {
309 		pr_err("cpuinfo: failed to register hotplug callbacks.\n");
310 		return ret;
311 	}
312 	return 0;
313 }
314 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
315 {
316 	unsigned int cpu = smp_processor_id();
317 	u32 l1ip = CTR_L1IP(info->reg_ctr);
318 
319 	switch (l1ip) {
320 	case ICACHE_POLICY_PIPT:
321 		break;
322 	case ICACHE_POLICY_VPIPT:
323 		set_bit(ICACHEF_VPIPT, &__icache_flags);
324 		break;
325 	default:
326 		/* Fallthrough */
327 	case ICACHE_POLICY_VIPT:
328 		/* Assume aliasing */
329 		set_bit(ICACHEF_ALIASING, &__icache_flags);
330 	}
331 
332 	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
333 }
334 
335 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
336 {
337 	info->reg_cntfrq = arch_timer_get_cntfrq();
338 	/*
339 	 * Use the effective value of the CTR_EL0 than the raw value
340 	 * exposed by the CPU. CTR_EL0.IDC field value must be interpreted
341 	 * with the CLIDR_EL1 fields to avoid triggering false warnings
342 	 * when there is a mismatch across the CPUs. Keep track of the
343 	 * effective value of the CTR_EL0 in our internal records for
344 	 * acurate sanity check and feature enablement.
345 	 */
346 	info->reg_ctr = read_cpuid_effective_cachetype();
347 	info->reg_dczid = read_cpuid(DCZID_EL0);
348 	info->reg_midr = read_cpuid_id();
349 	info->reg_revidr = read_cpuid(REVIDR_EL1);
350 
351 	info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
352 	info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
353 	info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
354 	info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
355 	info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
356 	info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
357 	info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
358 	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
359 	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
360 	info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
361 
362 	/* Update the 32bit ID registers only if AArch32 is implemented */
363 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
364 		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
365 		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
366 		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
367 		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
368 		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
369 		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
370 		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
371 		info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
372 		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
373 		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
374 		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
375 		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
376 		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
377 		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
378 
379 		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
380 		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
381 		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
382 	}
383 
384 	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
385 	    id_aa64pfr0_sve(info->reg_id_aa64pfr0))
386 		info->reg_zcr = read_zcr_features();
387 
388 	cpuinfo_detect_icache_policy(info);
389 }
390 
391 void cpuinfo_store_cpu(void)
392 {
393 	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
394 	__cpuinfo_store_cpu(info);
395 	update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
396 }
397 
398 void __init cpuinfo_store_boot_cpu(void)
399 {
400 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
401 	__cpuinfo_store_cpu(info);
402 
403 	boot_cpu_data = *info;
404 	init_cpu_features(&boot_cpu_data);
405 }
406 
407 device_initcall(cpuinfo_regs_init);
408