xref: /freebsd/sys/arm/arm/cpufunc.c (revision 06c3fb27)
1 /*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55 
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59 
60 #include <machine/cpufunc.h>
61 
62 /* PRIMARY CACHE VARIABLES */
63 
64 unsigned int	arm_dcache_align;
65 unsigned int	arm_dcache_align_mask;
66 
67 #ifdef CPU_MV_PJ4B
68 static void pj4bv7_setup(void);
69 #endif
70 #if defined(CPU_ARM1176)
71 static void arm11x6_setup(void);
72 #endif
73 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
74 static void cortexa_setup(void);
75 #endif
76 
77 #ifdef CPU_MV_PJ4B
78 struct cpu_functions pj4bv7_cpufuncs = {
79 	/* Cache operations */
80 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
81 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
82 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
83 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
84 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
85 
86 	/* Other functions */
87 	.cf_sleep = (void *)cpufunc_nullop,
88 
89 	/* Soft functions */
90 	.cf_setup = pj4bv7_setup
91 };
92 #endif /* CPU_MV_PJ4B */
93 
94 #if defined(CPU_ARM1176)
95 struct cpu_functions arm1176_cpufuncs = {
96 	/* Cache operations */
97 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
98 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
99 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
100 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
101 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
102 
103 	/* Other functions */
104 	.cf_sleep = arm11x6_sleep,
105 
106 	/* Soft functions */
107 	.cf_setup = arm11x6_setup
108 };
109 #endif /*CPU_ARM1176 */
110 
111 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
112 struct cpu_functions cortexa_cpufuncs = {
113 	/* Cache operations */
114 
115 	/*
116 	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
117 	 * L2 cache controller is actually enabled.
118 	 */
119 	.cf_l2cache_wbinv_all = cpufunc_nullop,
120 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
121 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
122 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
123 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
124 
125 	/* Other functions */
126 	.cf_sleep = armv7_cpu_sleep,
127 
128 	/* Soft functions */
129 	.cf_setup = cortexa_setup
130 };
131 #endif /* CPU_CORTEXA || CPU_KRAIT */
132 
133 /*
134  * Global constants also used by locore.s
135  */
136 
137 struct cpu_functions cpufuncs;
138 u_int cputype;
139 
140 static void get_cachetype_cp15(void);
141 
142 static void
143 get_cachetype_cp15(void)
144 {
145 	u_int ctype, dsize, cpuid;
146 	u_int clevel, csize, i, sel;
147 	u_char type;
148 
149 	ctype = cp15_ctr_get();
150 	cpuid = cp15_midr_get();
151 	/*
152 	 * ...and thus spake the ARM ARM:
153 	 *
154 	 * If an <opcode2> value corresponding to an unimplemented or
155 	 * reserved ID register is encountered, the System Control
156 	 * processor returns the value of the main ID register.
157 	 */
158 	if (ctype == cpuid)
159 		goto out;
160 
161 	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
162 		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
163 		    : "=r" (clevel));
164 		i = 0;
165 		while ((type = (clevel & 0x7)) && i < 7) {
166 			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
167 			    type == CACHE_SEP_CACHE) {
168 				sel = i << 1;
169 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
170 				    : : "r" (sel));
171 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
172 				    : "=r" (csize));
173 				arm_dcache_align = 1U <<
174 				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
175 			}
176 			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
177 				sel = (i << 1) | 1;
178 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
179 				    : : "r" (sel));
180 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
181 				    : "=r" (csize));
182 			}
183 			i++;
184 			clevel >>= 3;
185 		}
186 	} else {
187 		/*
188 		 * If you want to know how this code works, go read the ARM ARM.
189 		 */
190 
191 		dsize = CPU_CT_DSIZE(ctype);
192 		arm_dcache_align = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
193 		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
194 			if (dsize & CPU_CT_xSIZE_M)
195 				arm_dcache_align = 0; /* not present */
196 		}
197 	}
198 
199 out:
200 	arm_dcache_align_mask = arm_dcache_align - 1;
201 }
202 
203 /*
204  * Cannot panic here as we may not have a console yet ...
205  */
206 
207 int
208 set_cpufuncs(void)
209 {
210 	cputype = cp15_midr_get();
211 	cputype &= CPU_ID_CPU_MASK;
212 
213 #if defined(CPU_ARM1176)
214 	if (cputype == CPU_ID_ARM1176JZS) {
215 		cpufuncs = arm1176_cpufuncs;
216 		get_cachetype_cp15();
217 		goto out;
218 	}
219 #endif /* CPU_ARM1176 */
220 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
221 	switch(cputype & CPU_ID_SCHEME_MASK) {
222 	case CPU_ID_CORTEXA5:
223 	case CPU_ID_CORTEXA7:
224 	case CPU_ID_CORTEXA8:
225 	case CPU_ID_CORTEXA9:
226 	case CPU_ID_CORTEXA12:
227 	case CPU_ID_CORTEXA15:
228 	case CPU_ID_CORTEXA53:
229 	case CPU_ID_CORTEXA57:
230 	case CPU_ID_CORTEXA72:
231 	case CPU_ID_KRAIT300:
232 		cpufuncs = cortexa_cpufuncs;
233 		get_cachetype_cp15();
234 		goto out;
235 	default:
236 		break;
237 	}
238 #endif /* CPU_CORTEXA || CPU_KRAIT */
239 
240 #if defined(CPU_MV_PJ4B)
241 	if (cputype == CPU_ID_MV88SV581X_V7 ||
242 	    cputype == CPU_ID_MV88SV584X_V7 ||
243 	    cputype == CPU_ID_ARM_88SV581X_V7) {
244 		cpufuncs = pj4bv7_cpufuncs;
245 		get_cachetype_cp15();
246 		goto out;
247 	}
248 #endif /* CPU_MV_PJ4B */
249 
250 	/*
251 	 * Bzzzz. And the answer was ...
252 	 */
253 	panic("No support for this CPU type (%08x) in kernel", cputype);
254 	return(ARCHITECTURE_NOT_PRESENT);
255 out:
256 	uma_set_cache_align_mask(arm_dcache_align_mask);
257 	return (0);
258 }
259 
260 /*
261  * CPU Setup code
262  */
263 
264 
265 #if defined(CPU_ARM1176) \
266  || defined(CPU_MV_PJ4B) \
267  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
268 static __inline void
269 cpu_scc_setup_ccnt(void)
270 {
271 /* This is how you give userland access to the CCNT and PMCn
272  * registers.
273  * BEWARE! This gives write access also, which may not be what
274  * you want!
275  */
276 #ifdef _PMC_USER_READ_WRITE_
277 	/* Set PMUSERENR[0] to allow userland access */
278 	cp15_pmuserenr_set(1);
279 #endif
280 #if defined(CPU_ARM1176)
281 	/* Set PMCR[2,0] to enable counters and reset CCNT */
282 	cp15_pmcr_set(5);
283 #else
284 	/* Set up the PMCCNTR register as a cyclecounter:
285 	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
286 	 * Set PMCR[2,0] to enable counters and reset CCNT
287 	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
288 	cp15_pminten_clr(0xFFFFFFFF);
289 	cp15_pmcr_set(5);
290 	cp15_pmcnten_set(0x80000000);
291 #endif
292 }
293 #endif
294 
295 #if defined(CPU_ARM1176)
296 static void
297 arm11x6_setup(void)
298 {
299 	uint32_t auxctrl, auxctrl_wax;
300 	uint32_t tmp, tmp2;
301 	uint32_t cpuid;
302 
303 	cpuid = cp15_midr_get();
304 
305 	auxctrl = 0;
306 	auxctrl_wax = ~0;
307 
308 	/*
309 	 * Enable an errata workaround
310 	 */
311 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
312 		auxctrl = ARM1176_AUXCTL_PHD;
313 		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
314 	}
315 
316 	tmp = cp15_actlr_get();
317 	tmp2 = tmp;
318 	tmp &= auxctrl_wax;
319 	tmp |= auxctrl;
320 	if (tmp != tmp2)
321 		cp15_actlr_set(tmp);
322 
323 	cpu_scc_setup_ccnt();
324 }
325 #endif  /* CPU_ARM1176 */
326 
327 #ifdef CPU_MV_PJ4B
328 static void
329 pj4bv7_setup(void)
330 {
331 
332 	pj4b_config();
333 	cpu_scc_setup_ccnt();
334 }
335 #endif /* CPU_MV_PJ4B */
336 
337 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
338 static void
339 cortexa_setup(void)
340 {
341 
342 	cpu_scc_setup_ccnt();
343 }
344 #endif  /* CPU_CORTEXA || CPU_KRAIT */
345