xref: /freebsd/sys/arm/arm/cpuinfo.c (revision 7bf5720a)
1 /*-
2  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysctl.h>
34 
35 #include <machine/cpu.h>
36 #include <machine/cpuinfo.h>
37 
38 #if __ARM_ARCH >= 6
39 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
40 #endif
41 
42 struct cpuinfo cpuinfo =
43 {
44 	/* Use safe defaults for start */
45 	.dcache_line_size = 32,
46 	.dcache_line_mask = 31,
47 	.icache_line_size = 32,
48 	.icache_line_mask = 31,
49 };
50 
51 static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD, 0,
52     "CPU");
53 static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD, 0,
54     "CPU quirks");
55 
56 /*
57  * Tunable CPU quirks.
58  * Be careful, ACTRL cannot be changed if CPU is started in secure
59  * mode(world) and write to ACTRL can cause exception!
60  * These quirks are intended for optimizing CPU performance, not for
61  * applying errata workarounds. Nobody can expect that CPU with unfixed
62  * errata is stable enough to execute the kernel until quirks are applied.
63  */
64 static uint32_t cpu_quirks_actlr_mask;
65 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
66     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
67     "Bits to be masked in ACTLR");
68 
69 static uint32_t cpu_quirks_actlr_set;
70 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
71     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
72     "Bits to be set in ACTLR");
73 
74 
75 /* Read and parse CPU id scheme */
76 void
77 cpuinfo_init(void)
78 {
79 
80 	cpuinfo.midr = cp15_midr_get();
81 	/* Test old version id schemes first */
82 	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
83 		if (CPU_ID_ISOLD(cpuinfo.midr)) {
84 			/* obsolete ARMv2 or ARMv3 CPU */
85 			cpuinfo.midr = 0;
86 			return;
87 		}
88 		if (CPU_ID_IS7(cpuinfo.midr)) {
89 			if ((cpuinfo.midr & (1 << 23)) == 0) {
90 				/* obsolete ARMv3 CPU */
91 				cpuinfo.midr = 0;
92 				return;
93 			}
94 			/* ARMv4T CPU */
95 			cpuinfo.architecture = 1;
96 			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
97 		} else {
98 			/* ARM new id scheme */
99 			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
100 			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
101 		}
102 	} else {
103 		/* non ARM -> must be new id scheme */
104 		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
105 		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
106 	}
107 	/* Parse rest of MIDR  */
108 	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
109 	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
110 	cpuinfo.patch = cpuinfo.midr & 0x0F;
111 
112 	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
113 	cpuinfo.ctr = cp15_ctr_get();
114 	cpuinfo.tcmtr = cp15_tcmtr_get();
115 #if __ARM_ARCH >= 6
116 	cpuinfo.tlbtr = cp15_tlbtr_get();
117 	cpuinfo.mpidr = cp15_mpidr_get();
118 	cpuinfo.revidr = cp15_revidr_get();
119 #endif
120 
121 	/* if CPU is not v7 cpu id scheme */
122 	if (cpuinfo.architecture != 0xF)
123 		return;
124 #if __ARM_ARCH >= 6
125 	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
126 	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
127 	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
128 	cpuinfo.id_afr0 = cp15_id_afr0_get();
129 	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
130 	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
131 	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
132 	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
133 	cpuinfo.id_isar0 = cp15_id_isar0_get();
134 	cpuinfo.id_isar1 = cp15_id_isar1_get();
135 	cpuinfo.id_isar2 = cp15_id_isar2_get();
136 	cpuinfo.id_isar3 = cp15_id_isar3_get();
137 	cpuinfo.id_isar4 = cp15_id_isar4_get();
138 	cpuinfo.id_isar5 = cp15_id_isar5_get();
139 
140 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
141 	cpuinfo.cbar = cp15_cbar_get();
142 */
143 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
144 		cpuinfo.ccsidr = cp15_ccsidr_get();
145 		cpuinfo.clidr = cp15_clidr_get();
146 	}
147 
148 	/* Test if revidr is implemented */
149 	if (cpuinfo.revidr == cpuinfo.midr)
150 		cpuinfo.revidr = 0;
151 
152 	/* parsed bits of above registers */
153 	/* id_mmfr0 */
154 	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
155 	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
156 	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
157 	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
158 	/* id_mmfr2 */
159 	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
160 	/* id_mmfr3 */
161 	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
162 	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
163 	/* id_pfr1 */
164 	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
165 	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
166 	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
167 	/* mpidr */
168 	cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
169 
170 	/* L1 Cache sizes */
171 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
172 		cpuinfo.dcache_line_size =
173 		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
174 		cpuinfo.icache_line_size =
175 		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
176 	} else {
177 		cpuinfo.dcache_line_size =
178 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
179 		cpuinfo.icache_line_size =
180 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
181 	}
182 	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
183 	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
184 #endif
185 }
186 
187 #if __ARM_ARCH >= 6
188 /*
189  * Get bits that must be set or cleared in ACLR register.
190  * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
191  * Its expected that SCU is in operational state before this
192  * function is called.
193  */
194 static void
195 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
196 {
197 
198 	*actlr_mask = 0;
199 	*actlr_set = 0;
200 
201 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
202 		switch (cpuinfo.part_number) {
203 		case CPU_ARCH_CORTEX_A73:
204 		case CPU_ARCH_CORTEX_A72:
205 		case CPU_ARCH_CORTEX_A57:
206 		case CPU_ARCH_CORTEX_A53:
207 			/* Nothing to do for AArch32 */
208 			break;
209 		case CPU_ARCH_CORTEX_A17:
210 		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
211 			/*
212 			 * Enable SMP mode
213 			 */
214 			*actlr_mask = (1 << 6);
215 			*actlr_set = (1 << 6);
216 			break;
217 		case CPU_ARCH_CORTEX_A15:
218 			/*
219 			 * Enable snoop-delayed exclusive handling
220 			 * Enable SMP mode
221 			 */
222 			*actlr_mask = (1U << 31) |(1 << 6);
223 			*actlr_set = (1U << 31) |(1 << 6);
224 			break;
225 		case CPU_ARCH_CORTEX_A9:
226 			/*
227 			 * Disable exclusive L1/L2 cache control
228 			 * Enable SMP mode
229 			 * Enable Cache and TLB maintenance broadcast
230 			 */
231 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
232 			*actlr_set = (1 << 6) | (1 << 0);
233 			break;
234 		case CPU_ARCH_CORTEX_A8:
235 			/*
236 			 * Enable L2 cache
237 			 * Enable L1 data cache hardware alias checks
238 			 */
239 			*actlr_mask = (1 << 1) | (1 << 0);
240 			*actlr_set = (1 << 1);
241 			break;
242 		case CPU_ARCH_CORTEX_A7:
243 			/*
244 			 * Enable SMP mode
245 			 */
246 			*actlr_mask = (1 << 6);
247 			*actlr_set = (1 << 6);
248 			break;
249 		case CPU_ARCH_CORTEX_A5:
250 			/*
251 			 * Disable exclusive L1/L2 cache control
252 			 * Enable SMP mode
253 			 * Enable Cache and TLB maintenance broadcast
254 			 */
255 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
256 			*actlr_set = (1 << 6) | (1 << 0);
257 			break;
258 		case CPU_ARCH_ARM1176:
259 			/*
260 			 * Restrict cache size to 16KB
261 			 * Enable the return stack
262 			 * Enable dynamic branch prediction
263 			 * Enable static branch prediction
264 			 */
265 			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
266 			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
267 			break;
268 		}
269 		return;
270 	}
271 }
272 
273 /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
274 void
275 cpuinfo_reinit_mmu(uint32_t ttb)
276 {
277 	uint32_t actlr_mask;
278 	uint32_t actlr_set;
279 
280 	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
281 	actlr_mask |= cpu_quirks_actlr_mask;
282 	actlr_set |= cpu_quirks_actlr_set;
283 	reinit_mmu(ttb, actlr_mask, actlr_set);
284 }
285 
286 #endif /* __ARM_ARCH >= 6 */
287