xref: /freebsd/sys/arm/arm/cpufunc.c (revision 4d846d26)
1 /*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/bus.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/uma.h>
61 
62 #include <machine/cpufunc.h>
63 
64 /* PRIMARY CACHE VARIABLES */
65 
66 int	arm_dcache_align;
67 int	arm_dcache_align_mask;
68 
69 #ifdef CPU_MV_PJ4B
70 static void pj4bv7_setup(void);
71 #endif
72 #if defined(CPU_ARM1176)
73 static void arm11x6_setup(void);
74 #endif
75 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
76 static void cortexa_setup(void);
77 #endif
78 
79 #ifdef CPU_MV_PJ4B
80 struct cpu_functions pj4bv7_cpufuncs = {
81 	/* Cache operations */
82 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
83 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
84 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
85 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
86 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
87 
88 	/* Other functions */
89 	.cf_sleep = (void *)cpufunc_nullop,
90 
91 	/* Soft functions */
92 	.cf_setup = pj4bv7_setup
93 };
94 #endif /* CPU_MV_PJ4B */
95 
96 #if defined(CPU_ARM1176)
97 struct cpu_functions arm1176_cpufuncs = {
98 	/* Cache operations */
99 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
100 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
101 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
102 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
103 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
104 
105 	/* Other functions */
106 	.cf_sleep = arm11x6_sleep,
107 
108 	/* Soft functions */
109 	.cf_setup = arm11x6_setup
110 };
111 #endif /*CPU_ARM1176 */
112 
113 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
114 struct cpu_functions cortexa_cpufuncs = {
115 	/* Cache operations */
116 
117 	/*
118 	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
119 	 * L2 cache controller is actually enabled.
120 	 */
121 	.cf_l2cache_wbinv_all = cpufunc_nullop,
122 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
123 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
124 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
125 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
126 
127 	/* Other functions */
128 	.cf_sleep = armv7_cpu_sleep,
129 
130 	/* Soft functions */
131 	.cf_setup = cortexa_setup
132 };
133 #endif /* CPU_CORTEXA || CPU_KRAIT */
134 
135 /*
136  * Global constants also used by locore.s
137  */
138 
139 struct cpu_functions cpufuncs;
140 u_int cputype;
141 
142 static void get_cachetype_cp15(void);
143 
144 static void
145 get_cachetype_cp15(void)
146 {
147 	u_int ctype, dsize, cpuid;
148 	u_int clevel, csize, i, sel;
149 	u_char type;
150 
151 	ctype = cp15_ctr_get();
152 	cpuid = cp15_midr_get();
153 	/*
154 	 * ...and thus spake the ARM ARM:
155 	 *
156 	 * If an <opcode2> value corresponding to an unimplemented or
157 	 * reserved ID register is encountered, the System Control
158 	 * processor returns the value of the main ID register.
159 	 */
160 	if (ctype == cpuid)
161 		goto out;
162 
163 	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
164 		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
165 		    : "=r" (clevel));
166 		i = 0;
167 		while ((type = (clevel & 0x7)) && i < 7) {
168 			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
169 			    type == CACHE_SEP_CACHE) {
170 				sel = i << 1;
171 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
172 				    : : "r" (sel));
173 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
174 				    : "=r" (csize));
175 				arm_dcache_align = 1 <<
176 				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
177 				arm_dcache_align_mask = arm_dcache_align - 1;
178 			}
179 			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
180 				sel = (i << 1) | 1;
181 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
182 				    : : "r" (sel));
183 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
184 				    : "=r" (csize));
185 			}
186 			i++;
187 			clevel >>= 3;
188 		}
189 	} else {
190 		/*
191 		 * If you want to know how this code works, go read the ARM ARM.
192 		 */
193 
194 		dsize = CPU_CT_DSIZE(ctype);
195 		arm_dcache_align = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
196 		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
197 			if (dsize & CPU_CT_xSIZE_M)
198 				arm_dcache_align = 0; /* not present */
199 		}
200 
201 	out:
202 		arm_dcache_align_mask = arm_dcache_align - 1;
203 	}
204 }
205 
206 /*
207  * Cannot panic here as we may not have a console yet ...
208  */
209 
210 int
211 set_cpufuncs(void)
212 {
213 	cputype = cp15_midr_get();
214 	cputype &= CPU_ID_CPU_MASK;
215 
216 #if defined(CPU_ARM1176)
217 	if (cputype == CPU_ID_ARM1176JZS) {
218 		cpufuncs = arm1176_cpufuncs;
219 		get_cachetype_cp15();
220 		goto out;
221 	}
222 #endif /* CPU_ARM1176 */
223 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
224 	switch(cputype & CPU_ID_SCHEME_MASK) {
225 	case CPU_ID_CORTEXA5:
226 	case CPU_ID_CORTEXA7:
227 	case CPU_ID_CORTEXA8:
228 	case CPU_ID_CORTEXA9:
229 	case CPU_ID_CORTEXA12:
230 	case CPU_ID_CORTEXA15:
231 	case CPU_ID_CORTEXA53:
232 	case CPU_ID_CORTEXA57:
233 	case CPU_ID_CORTEXA72:
234 	case CPU_ID_KRAIT300:
235 		cpufuncs = cortexa_cpufuncs;
236 		get_cachetype_cp15();
237 		goto out;
238 	default:
239 		break;
240 	}
241 #endif /* CPU_CORTEXA || CPU_KRAIT */
242 
243 #if defined(CPU_MV_PJ4B)
244 	if (cputype == CPU_ID_MV88SV581X_V7 ||
245 	    cputype == CPU_ID_MV88SV584X_V7 ||
246 	    cputype == CPU_ID_ARM_88SV581X_V7) {
247 		cpufuncs = pj4bv7_cpufuncs;
248 		get_cachetype_cp15();
249 		goto out;
250 	}
251 #endif /* CPU_MV_PJ4B */
252 
253 	/*
254 	 * Bzzzz. And the answer was ...
255 	 */
256 	panic("No support for this CPU type (%08x) in kernel", cputype);
257 	return(ARCHITECTURE_NOT_PRESENT);
258 out:
259 	uma_set_align(arm_dcache_align_mask);
260 	return (0);
261 }
262 
263 /*
264  * CPU Setup code
265  */
266 
267 
268 #if defined(CPU_ARM1176) \
269  || defined(CPU_MV_PJ4B) \
270  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
271 static __inline void
272 cpu_scc_setup_ccnt(void)
273 {
274 /* This is how you give userland access to the CCNT and PMCn
275  * registers.
276  * BEWARE! This gives write access also, which may not be what
277  * you want!
278  */
279 #ifdef _PMC_USER_READ_WRITE_
280 	/* Set PMUSERENR[0] to allow userland access */
281 	cp15_pmuserenr_set(1);
282 #endif
283 #if defined(CPU_ARM1176)
284 	/* Set PMCR[2,0] to enable counters and reset CCNT */
285 	cp15_pmcr_set(5);
286 #else
287 	/* Set up the PMCCNTR register as a cyclecounter:
288 	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
289 	 * Set PMCR[2,0] to enable counters and reset CCNT
290 	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
291 	cp15_pminten_clr(0xFFFFFFFF);
292 	cp15_pmcr_set(5);
293 	cp15_pmcnten_set(0x80000000);
294 #endif
295 }
296 #endif
297 
298 #if defined(CPU_ARM1176)
299 static void
300 arm11x6_setup(void)
301 {
302 	uint32_t auxctrl, auxctrl_wax;
303 	uint32_t tmp, tmp2;
304 	uint32_t cpuid;
305 
306 	cpuid = cp15_midr_get();
307 
308 	auxctrl = 0;
309 	auxctrl_wax = ~0;
310 
311 	/*
312 	 * Enable an errata workaround
313 	 */
314 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
315 		auxctrl = ARM1176_AUXCTL_PHD;
316 		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
317 	}
318 
319 	tmp = cp15_actlr_get();
320 	tmp2 = tmp;
321 	tmp &= auxctrl_wax;
322 	tmp |= auxctrl;
323 	if (tmp != tmp2)
324 		cp15_actlr_set(tmp);
325 
326 	cpu_scc_setup_ccnt();
327 }
328 #endif  /* CPU_ARM1176 */
329 
330 #ifdef CPU_MV_PJ4B
331 static void
332 pj4bv7_setup(void)
333 {
334 
335 	pj4b_config();
336 	cpu_scc_setup_ccnt();
337 }
338 #endif /* CPU_MV_PJ4B */
339 
340 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
341 static void
342 cortexa_setup(void)
343 {
344 
345 	cpu_scc_setup_ccnt();
346 }
347 #endif  /* CPU_CORTEXA || CPU_KRAIT */
348