xref: /freebsd/sys/arm/arm/cpuinfo.c (revision 32c48d07)
1 /*-
2  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 
36 #include <machine/cpu.h>
37 #include <machine/cpuinfo.h>
38 #include <machine/elf.h>
39 #include <machine/md_var.h>
40 
41 #if __ARM_ARCH >= 6
42 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
43 #endif
44 
45 struct cpuinfo cpuinfo =
46 {
47 	/* Use safe defaults for start */
48 	.dcache_line_size = 32,
49 	.dcache_line_mask = 31,
50 	.icache_line_size = 32,
51 	.icache_line_mask = 31,
52 };
53 
54 static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD, 0,
55     "CPU");
56 static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD, 0,
57     "CPU quirks");
58 
59 /*
60  * Tunable CPU quirks.
61  * Be careful, ACTRL cannot be changed if CPU is started in secure
62  * mode(world) and write to ACTRL can cause exception!
63  * These quirks are intended for optimizing CPU performance, not for
64  * applying errata workarounds. Nobody can expect that CPU with unfixed
65  * errata is stable enough to execute the kernel until quirks are applied.
66  */
67 static uint32_t cpu_quirks_actlr_mask;
68 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
69     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
70     "Bits to be masked in ACTLR");
71 
72 static uint32_t cpu_quirks_actlr_set;
73 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
74     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
75     "Bits to be set in ACTLR");
76 
77 
78 /* Read and parse CPU id scheme */
79 void
80 cpuinfo_init(void)
81 {
82 #if __ARM_ARCH >= 6
83 	uint32_t tmp;
84 #endif
85 
86 	/*
87 	 * Prematurely fetch CPU quirks. Standard fetch for tunable
88 	 * sysctls is handled using SYSINIT, thus too late for boot CPU.
89 	 * Keep names in sync with sysctls.
90 	 */
91 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
92 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
93 
94 	cpuinfo.midr = cp15_midr_get();
95 	/* Test old version id schemes first */
96 	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
97 		if (CPU_ID_ISOLD(cpuinfo.midr)) {
98 			/* obsolete ARMv2 or ARMv3 CPU */
99 			cpuinfo.midr = 0;
100 			return;
101 		}
102 		if (CPU_ID_IS7(cpuinfo.midr)) {
103 			if ((cpuinfo.midr & (1 << 23)) == 0) {
104 				/* obsolete ARMv3 CPU */
105 				cpuinfo.midr = 0;
106 				return;
107 			}
108 			/* ARMv4T CPU */
109 			cpuinfo.architecture = 1;
110 			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
111 		} else {
112 			/* ARM new id scheme */
113 			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
114 			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
115 		}
116 	} else {
117 		/* non ARM -> must be new id scheme */
118 		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
119 		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
120 	}
121 	/* Parse rest of MIDR  */
122 	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
123 	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
124 	cpuinfo.patch = cpuinfo.midr & 0x0F;
125 
126 	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
127 	cpuinfo.ctr = cp15_ctr_get();
128 	cpuinfo.tcmtr = cp15_tcmtr_get();
129 #if __ARM_ARCH >= 6
130 	cpuinfo.tlbtr = cp15_tlbtr_get();
131 	cpuinfo.mpidr = cp15_mpidr_get();
132 	cpuinfo.revidr = cp15_revidr_get();
133 #endif
134 
135 	/* if CPU is not v7 cpu id scheme */
136 	if (cpuinfo.architecture != 0xF)
137 		return;
138 #if __ARM_ARCH >= 6
139 	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
140 	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
141 	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
142 	cpuinfo.id_afr0 = cp15_id_afr0_get();
143 	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
144 	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
145 	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
146 	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
147 	cpuinfo.id_isar0 = cp15_id_isar0_get();
148 	cpuinfo.id_isar1 = cp15_id_isar1_get();
149 	cpuinfo.id_isar2 = cp15_id_isar2_get();
150 	cpuinfo.id_isar3 = cp15_id_isar3_get();
151 	cpuinfo.id_isar4 = cp15_id_isar4_get();
152 	cpuinfo.id_isar5 = cp15_id_isar5_get();
153 
154 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
155 	cpuinfo.cbar = cp15_cbar_get();
156 */
157 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
158 		cpuinfo.ccsidr = cp15_ccsidr_get();
159 		cpuinfo.clidr = cp15_clidr_get();
160 	}
161 
162 	/* Test if revidr is implemented */
163 	if (cpuinfo.revidr == cpuinfo.midr)
164 		cpuinfo.revidr = 0;
165 
166 	/* parsed bits of above registers */
167 	/* id_mmfr0 */
168 	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
169 	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
170 	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
171 	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
172 	/* id_mmfr2 */
173 	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
174 	/* id_mmfr3 */
175 	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
176 	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
177 	/* id_pfr1 */
178 	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
179 	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
180 	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
181 	/* mpidr */
182 	cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
183 
184 	/* L1 Cache sizes */
185 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
186 		cpuinfo.dcache_line_size =
187 		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
188 		cpuinfo.icache_line_size =
189 		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
190 	} else {
191 		cpuinfo.dcache_line_size =
192 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
193 		cpuinfo.icache_line_size =
194 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
195 	}
196 	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
197 	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
198 
199 	/* Fill AT_HWCAP bits. */
200 	elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
201 	elf_hwcap |= HWCAP_TLS | HWCAP_EDSP;	   /* Required for v6+ CPUs */
202 
203 	tmp = (cpuinfo.id_isar0 >> 24) & 0xF;	/* Divide_instrs */
204 	if (tmp >= 1)
205 		elf_hwcap |= HWCAP_IDIVT;
206 	if (tmp >= 2)
207 		elf_hwcap |= HWCAP_IDIVA;
208 
209 	tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; 	/* State1  */
210 	if (tmp >= 1)
211 		elf_hwcap |= HWCAP_THUMB;
212 
213 	tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; 	/* State3  */
214 	if (tmp >= 1)
215 		elf_hwcap |= HWCAP_THUMBEE;
216 
217 	tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; 	/* VMSA */
218 	if (tmp >= 5)
219 		elf_hwcap |= HWCAP_LPAE;
220 
221 	/* Fill AT_HWCAP2 bits. */
222 	tmp = (cpuinfo.id_isar5 >> 4) & 0xF;	/* AES */
223 	if (tmp >= 1)
224 		elf_hwcap2 |= HWCAP2_AES;
225 	if (tmp >= 2)
226 		elf_hwcap2 |= HWCAP2_PMULL;
227 
228 	tmp = (cpuinfo.id_isar5 >> 8) & 0xF;	/* SHA1 */
229 	if (tmp >= 1)
230 		elf_hwcap2 |= HWCAP2_SHA1;
231 
232 	tmp = (cpuinfo.id_isar5 >> 12) & 0xF;	/* SHA2 */
233 	if (tmp >= 1)
234 		elf_hwcap2 |= HWCAP2_SHA2;
235 
236 	tmp = (cpuinfo.id_isar5 >> 16) & 0xF;	/* CRC32 */
237 	if (tmp >= 1)
238 		elf_hwcap2 |= HWCAP2_CRC32;
239 #endif
240 }
241 
242 #if __ARM_ARCH >= 6
243 /*
244  * Get bits that must be set or cleared in ACLR register.
245  * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
246  * Its expected that SCU is in operational state before this
247  * function is called.
248  */
249 static void
250 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
251 {
252 
253 	*actlr_mask = 0;
254 	*actlr_set = 0;
255 
256 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
257 		switch (cpuinfo.part_number) {
258 		case CPU_ARCH_CORTEX_A73:
259 		case CPU_ARCH_CORTEX_A72:
260 		case CPU_ARCH_CORTEX_A57:
261 		case CPU_ARCH_CORTEX_A53:
262 			/* Nothing to do for AArch32 */
263 			break;
264 		case CPU_ARCH_CORTEX_A17:
265 		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
266 			/*
267 			 * Enable SMP mode
268 			 */
269 			*actlr_mask = (1 << 6);
270 			*actlr_set = (1 << 6);
271 			break;
272 		case CPU_ARCH_CORTEX_A15:
273 			/*
274 			 * Enable snoop-delayed exclusive handling
275 			 * Enable SMP mode
276 			 */
277 			*actlr_mask = (1U << 31) |(1 << 6);
278 			*actlr_set = (1U << 31) |(1 << 6);
279 			break;
280 		case CPU_ARCH_CORTEX_A9:
281 			/*
282 			 * Disable exclusive L1/L2 cache control
283 			 * Enable SMP mode
284 			 * Enable Cache and TLB maintenance broadcast
285 			 */
286 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
287 			*actlr_set = (1 << 6) | (1 << 0);
288 			break;
289 		case CPU_ARCH_CORTEX_A8:
290 			/*
291 			 * Enable L2 cache
292 			 * Enable L1 data cache hardware alias checks
293 			 */
294 			*actlr_mask = (1 << 1) | (1 << 0);
295 			*actlr_set = (1 << 1);
296 			break;
297 		case CPU_ARCH_CORTEX_A7:
298 			/*
299 			 * Enable SMP mode
300 			 */
301 			*actlr_mask = (1 << 6);
302 			*actlr_set = (1 << 6);
303 			break;
304 		case CPU_ARCH_CORTEX_A5:
305 			/*
306 			 * Disable exclusive L1/L2 cache control
307 			 * Enable SMP mode
308 			 * Enable Cache and TLB maintenance broadcast
309 			 */
310 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
311 			*actlr_set = (1 << 6) | (1 << 0);
312 			break;
313 		case CPU_ARCH_ARM1176:
314 			/*
315 			 * Restrict cache size to 16KB
316 			 * Enable the return stack
317 			 * Enable dynamic branch prediction
318 			 * Enable static branch prediction
319 			 */
320 			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
321 			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
322 			break;
323 		}
324 		return;
325 	}
326 }
327 
328 /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
329 void
330 cpuinfo_reinit_mmu(uint32_t ttb)
331 {
332 	uint32_t actlr_mask;
333 	uint32_t actlr_set;
334 
335 	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
336 	actlr_mask |= cpu_quirks_actlr_mask;
337 	actlr_set |= cpu_quirks_actlr_set;
338 	reinit_mmu(ttb, actlr_mask, actlr_set);
339 }
340 
341 #endif /* __ARM_ARCH >= 6 */
342