xref: /freebsd/sys/arm/arm/cpufunc.c (revision 1d386b48)
1 /*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55 
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59 
60 #include <machine/cpufunc.h>
61 
62 /* PRIMARY CACHE VARIABLES */
63 
64 int	arm_dcache_align;
65 int	arm_dcache_align_mask;
66 
67 #ifdef CPU_MV_PJ4B
68 static void pj4bv7_setup(void);
69 #endif
70 #if defined(CPU_ARM1176)
71 static void arm11x6_setup(void);
72 #endif
73 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
74 static void cortexa_setup(void);
75 #endif
76 
77 #ifdef CPU_MV_PJ4B
78 struct cpu_functions pj4bv7_cpufuncs = {
79 	/* Cache operations */
80 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
81 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
82 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
83 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
84 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
85 
86 	/* Other functions */
87 	.cf_sleep = (void *)cpufunc_nullop,
88 
89 	/* Soft functions */
90 	.cf_setup = pj4bv7_setup
91 };
92 #endif /* CPU_MV_PJ4B */
93 
94 #if defined(CPU_ARM1176)
95 struct cpu_functions arm1176_cpufuncs = {
96 	/* Cache operations */
97 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
98 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
99 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
100 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
101 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
102 
103 	/* Other functions */
104 	.cf_sleep = arm11x6_sleep,
105 
106 	/* Soft functions */
107 	.cf_setup = arm11x6_setup
108 };
109 #endif /*CPU_ARM1176 */
110 
111 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
112 struct cpu_functions cortexa_cpufuncs = {
113 	/* Cache operations */
114 
115 	/*
116 	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
117 	 * L2 cache controller is actually enabled.
118 	 */
119 	.cf_l2cache_wbinv_all = cpufunc_nullop,
120 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
121 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
122 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
123 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
124 
125 	/* Other functions */
126 	.cf_sleep = armv7_cpu_sleep,
127 
128 	/* Soft functions */
129 	.cf_setup = cortexa_setup
130 };
131 #endif /* CPU_CORTEXA || CPU_KRAIT */
132 
133 /*
134  * Global constants also used by locore.s
135  */
136 
137 struct cpu_functions cpufuncs;
138 u_int cputype;
139 
140 static void get_cachetype_cp15(void);
141 
142 static void
143 get_cachetype_cp15(void)
144 {
145 	u_int ctype, dsize, cpuid;
146 	u_int clevel, csize, i, sel;
147 	u_char type;
148 
149 	ctype = cp15_ctr_get();
150 	cpuid = cp15_midr_get();
151 	/*
152 	 * ...and thus spake the ARM ARM:
153 	 *
154 	 * If an <opcode2> value corresponding to an unimplemented or
155 	 * reserved ID register is encountered, the System Control
156 	 * processor returns the value of the main ID register.
157 	 */
158 	if (ctype == cpuid)
159 		goto out;
160 
161 	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
162 		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
163 		    : "=r" (clevel));
164 		i = 0;
165 		while ((type = (clevel & 0x7)) && i < 7) {
166 			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
167 			    type == CACHE_SEP_CACHE) {
168 				sel = i << 1;
169 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
170 				    : : "r" (sel));
171 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
172 				    : "=r" (csize));
173 				arm_dcache_align = 1 <<
174 				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
175 				arm_dcache_align_mask = arm_dcache_align - 1;
176 			}
177 			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
178 				sel = (i << 1) | 1;
179 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
180 				    : : "r" (sel));
181 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
182 				    : "=r" (csize));
183 			}
184 			i++;
185 			clevel >>= 3;
186 		}
187 	} else {
188 		/*
189 		 * If you want to know how this code works, go read the ARM ARM.
190 		 */
191 
192 		dsize = CPU_CT_DSIZE(ctype);
193 		arm_dcache_align = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
194 		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
195 			if (dsize & CPU_CT_xSIZE_M)
196 				arm_dcache_align = 0; /* not present */
197 		}
198 
199 	out:
200 		arm_dcache_align_mask = arm_dcache_align - 1;
201 	}
202 }
203 
204 /*
205  * Cannot panic here as we may not have a console yet ...
206  */
207 
208 int
209 set_cpufuncs(void)
210 {
211 	cputype = cp15_midr_get();
212 	cputype &= CPU_ID_CPU_MASK;
213 
214 #if defined(CPU_ARM1176)
215 	if (cputype == CPU_ID_ARM1176JZS) {
216 		cpufuncs = arm1176_cpufuncs;
217 		get_cachetype_cp15();
218 		goto out;
219 	}
220 #endif /* CPU_ARM1176 */
221 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
222 	switch(cputype & CPU_ID_SCHEME_MASK) {
223 	case CPU_ID_CORTEXA5:
224 	case CPU_ID_CORTEXA7:
225 	case CPU_ID_CORTEXA8:
226 	case CPU_ID_CORTEXA9:
227 	case CPU_ID_CORTEXA12:
228 	case CPU_ID_CORTEXA15:
229 	case CPU_ID_CORTEXA53:
230 	case CPU_ID_CORTEXA57:
231 	case CPU_ID_CORTEXA72:
232 	case CPU_ID_KRAIT300:
233 		cpufuncs = cortexa_cpufuncs;
234 		get_cachetype_cp15();
235 		goto out;
236 	default:
237 		break;
238 	}
239 #endif /* CPU_CORTEXA || CPU_KRAIT */
240 
241 #if defined(CPU_MV_PJ4B)
242 	if (cputype == CPU_ID_MV88SV581X_V7 ||
243 	    cputype == CPU_ID_MV88SV584X_V7 ||
244 	    cputype == CPU_ID_ARM_88SV581X_V7) {
245 		cpufuncs = pj4bv7_cpufuncs;
246 		get_cachetype_cp15();
247 		goto out;
248 	}
249 #endif /* CPU_MV_PJ4B */
250 
251 	/*
252 	 * Bzzzz. And the answer was ...
253 	 */
254 	panic("No support for this CPU type (%08x) in kernel", cputype);
255 	return(ARCHITECTURE_NOT_PRESENT);
256 out:
257 	uma_set_align(arm_dcache_align_mask);
258 	return (0);
259 }
260 
261 /*
262  * CPU Setup code
263  */
264 
265 
266 #if defined(CPU_ARM1176) \
267  || defined(CPU_MV_PJ4B) \
268  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
269 static __inline void
270 cpu_scc_setup_ccnt(void)
271 {
272 /* This is how you give userland access to the CCNT and PMCn
273  * registers.
274  * BEWARE! This gives write access also, which may not be what
275  * you want!
276  */
277 #ifdef _PMC_USER_READ_WRITE_
278 	/* Set PMUSERENR[0] to allow userland access */
279 	cp15_pmuserenr_set(1);
280 #endif
281 #if defined(CPU_ARM1176)
282 	/* Set PMCR[2,0] to enable counters and reset CCNT */
283 	cp15_pmcr_set(5);
284 #else
285 	/* Set up the PMCCNTR register as a cyclecounter:
286 	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
287 	 * Set PMCR[2,0] to enable counters and reset CCNT
288 	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
289 	cp15_pminten_clr(0xFFFFFFFF);
290 	cp15_pmcr_set(5);
291 	cp15_pmcnten_set(0x80000000);
292 #endif
293 }
294 #endif
295 
296 #if defined(CPU_ARM1176)
297 static void
298 arm11x6_setup(void)
299 {
300 	uint32_t auxctrl, auxctrl_wax;
301 	uint32_t tmp, tmp2;
302 	uint32_t cpuid;
303 
304 	cpuid = cp15_midr_get();
305 
306 	auxctrl = 0;
307 	auxctrl_wax = ~0;
308 
309 	/*
310 	 * Enable an errata workaround
311 	 */
312 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
313 		auxctrl = ARM1176_AUXCTL_PHD;
314 		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
315 	}
316 
317 	tmp = cp15_actlr_get();
318 	tmp2 = tmp;
319 	tmp &= auxctrl_wax;
320 	tmp |= auxctrl;
321 	if (tmp != tmp2)
322 		cp15_actlr_set(tmp);
323 
324 	cpu_scc_setup_ccnt();
325 }
326 #endif  /* CPU_ARM1176 */
327 
328 #ifdef CPU_MV_PJ4B
329 static void
330 pj4bv7_setup(void)
331 {
332 
333 	pj4b_config();
334 	cpu_scc_setup_ccnt();
335 }
336 #endif /* CPU_MV_PJ4B */
337 
338 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
339 static void
340 cortexa_setup(void)
341 {
342 
343 	cpu_scc_setup_ccnt();
344 }
345 #endif  /* CPU_CORTEXA || CPU_KRAIT */
346