xref: /openbsd/sys/arch/arm/arm/cpufunc.c (revision 9f52241d)
1 /*	$OpenBSD: cpufunc.c,v 1.58 2025/01/20 20:13:29 kettenis Exp $	*/
2 /*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
3 
4 /*
5  * arm7tdmi support code Copyright (c) 2001 John Fremlin
6  * arm8 support code Copyright (c) 1997 ARM Limited
7  * arm8 support code Copyright (c) 1997 Causality Limited
8  * arm9 support code Copyright (C) 2001 ARM Ltd
9  * Copyright (c) 1997 Mark Brinicombe.
10  * Copyright (c) 1997 Causality Limited
11  * All rights reserved.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by Causality Limited.
24  * 4. The name of Causality Limited may not be used to endorse or promote
25  *    products derived from this software without specific prior written
26  *    permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
29  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
30  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
31  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
32  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
33  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
34  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * RiscBSD kernel project
41  *
42  * cpufuncs.c
43  *
44  * C functions for supporting CPU / MMU / TLB specific operations.
45  *
46  * Created      : 30/01/97
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 
52 #include <uvm/uvm_extern.h>
53 
54 #include <machine/pmap.h>
55 #include <arm/cpuconf.h>
56 
57 #if defined(PERFCTRS)
58 struct arm_pmc_funcs *arm_pmc;
59 #endif
60 
61 /* PRIMARY CACHE VARIABLES */
62 int	arm_picache_size;
63 int	arm_picache_line_size;
64 int	arm_picache_ways;
65 
66 int	arm_pdcache_size;	/* and unified */
67 int	arm_pdcache_line_size;
68 int	arm_pdcache_ways;
69 
70 int	arm_pcache_type;
71 int	arm_pcache_unified;
72 
73 int	arm_dcache_align;
74 int	arm_dcache_align_mask;
75 
76 /* 1 == use cpu_sleep(), 0 == don't */
77 int cpu_do_powersave;
78 
79 struct cpu_functions armv7_cpufuncs = {
80 	/* CPU functions */
81 
82 	cpufunc_id,			/* id			*/
83 	cpufunc_nullop,			/* cpwait		*/
84 
85 	/* MMU functions */
86 
87 	cpufunc_control,		/* control		*/
88 	cpufunc_auxcontrol,		/* aux control		*/
89 	cpufunc_domains,		/* Domain		*/
90 	armv7_setttb,			/* Setttb		*/
91 	cpufunc_dfsr,			/* dfsr			*/
92 	cpufunc_dfar,			/* dfar			*/
93 	cpufunc_ifsr,			/* ifsr			*/
94 	cpufunc_ifar,			/* ifar			*/
95 
96 	/* TLB functions */
97 
98 	armv7_tlb_flushID,		/* tlb_flushID		*/
99 	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
100 	armv7_tlb_flushID,		/* tlb_flushI		*/
101 	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
102 	armv7_tlb_flushD,		/* tlb_flushD		*/
103 	armv7_tlb_flushD_SE,		/* tlb_flushD_SE	*/
104 
105 	/* Cache operations */
106 
107 	armv7_icache_sync_all,		/* icache_sync_all	*/
108 	armv7_icache_sync_range,	/* icache_sync_range	*/
109 
110 	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
111 	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
112 	armv7_dcache_inv_range,		/* dcache_inv_range	*/
113 	armv7_dcache_wb_range,		/* dcache_wb_range	*/
114 
115 	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
116 	armv7_idcache_wbinv_range,	/* idcache_wbinv_range	*/
117 
118 	cpufunc_nullop,			/* sdcache_wbinv_all	*/
119 	(void *)cpufunc_nullop,		/* sdcache_wbinv_range	*/
120 	(void *)cpufunc_nullop,		/* sdcache_inv_range	*/
121 	(void *)cpufunc_nullop,		/* sdcache_wb_range	*/
122 	(void *)cpufunc_nullop,		/* sdcache_drain_writebuf */
123 
124 	/* Other functions */
125 
126 	cpufunc_nullop,			/* flush_prefetchbuf	*/
127 	armv7_drain_writebuf,		/* drain_writebuf	*/
128 
129 	armv7_cpu_sleep,		/* sleep (wait for interrupt) */
130 
131 	/* Soft functions */
132 	armv7_context_switch,		/* context_switch	*/
133 	armv7_setup			/* cpu setup		*/
134 };
135 
136 /*
137  * Global constants also used by locore.s
138  */
139 
140 struct cpu_functions cpufuncs;
141 u_int cputype;
142 
143 int	arm_icache_min_line_size = 32;
144 int	arm_dcache_min_line_size = 32;
145 int	arm_idcache_min_line_size = 32;
146 
147 void arm_get_cachetype_cp15v7 (void);
148 int	arm_dcache_l2_nsets;
149 int	arm_dcache_l2_assoc;
150 int	arm_dcache_l2_linesize;
151 
152 /*
153  * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know).
154  */
155 static int
log2(unsigned int i)156 log2(unsigned int i)
157 {
158 	int ret = 0;
159 
160 	while (i >>= 1)
161 		ret++;
162 
163 	return (ret);
164 }
165 
166 void
arm_get_cachetype_cp15v7(void)167 arm_get_cachetype_cp15v7(void)
168 {
169 	uint32_t ctype;
170 	uint32_t cachereg;
171 	uint32_t cache_level_id;
172 	uint32_t sets;
173 	uint32_t sel, level;
174 
175 	/* CTR - Cache Type Register */
176 	__asm volatile("mrc p15, 0, %0, c0, c0, 1"
177 		: "=r" (ctype));
178 
179 	arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
180 	arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
181 	arm_idcache_min_line_size =
182 	    min(arm_icache_min_line_size, arm_dcache_min_line_size);
183 
184 	/* CLIDR - Cache Level ID Register */
185 	__asm volatile("mrc p15, 1, %0, c0, c0, 1"
186 		: "=r" (cache_level_id) :);
187 	cpu_drain_writebuf();
188 
189 	/* L1 Cache available. */
190 	level = 0;
191 	if (cache_level_id & (0x7 << level)) {
192 		/* Unified cache. */
193 		if (cache_level_id & (0x4 << level))
194 			arm_pcache_unified = 1;
195 
196 		/* Unified or data cache separate. */
197 		if (cache_level_id & (0x4 << level) ||
198 		    cache_level_id & (0x2 << level)) {
199 			sel = level << 1 | 0 << 0; /* L1 | unified/data cache */
200 			/* CSSELR - Cache Size Selection Register */
201 			__asm volatile("mcr p15, 2, %0, c0, c0, 0"
202 				:: "r" (sel));
203 			cpu_drain_writebuf();
204 			/* CCSIDR - Cache Size Identification Register */
205 			__asm volatile("mrc p15, 1, %0, c0, c0, 0"
206 			: "=r" (cachereg) :);
207 			cpu_drain_writebuf();
208 			sets = ((cachereg >> 13) & 0x7fff) + 1;
209 			arm_pdcache_line_size = 1 << ((cachereg & 0x7) + 4);
210 			arm_pdcache_ways = ((cachereg >> 3) & 0x3ff) + 1;
211 			arm_pdcache_size = arm_pdcache_line_size * arm_pdcache_ways * sets;
212 			switch (cachereg & 0xc0000000) {
213 			case 0x00000000:
214 				arm_pcache_type = 0;
215 				break;
216 			case 0x40000000:
217 			case 0xc0000000:
218 				arm_pcache_type = CPU_CT_CTYPE_WB1;
219 				break;
220 			case 0x80000000:
221 				arm_pcache_type = CPU_CT_CTYPE_WT;
222 				break;
223 			}
224 		}
225 
226 		/* Instruction cache separate. */
227 		if (cache_level_id & (0x1 << level)) {
228 			sel = level << 1 | 1 << 0; /* L1 | instruction cache */
229 			/* CSSELR - Cache Size Selection Register */
230 			__asm volatile("mcr p15, 2, %0, c0, c0, 0"
231 				:: "r" (sel));
232 			cpu_drain_writebuf();
233 			/* CCSIDR - Cache Size Identification Register */
234 			__asm volatile("mrc p15, 1, %0, c0, c0, 0"
235 			: "=r" (cachereg) :);
236 			cpu_drain_writebuf();
237 			sets = ((cachereg >> 13) & 0x7fff) + 1;
238 			arm_picache_line_size = 1 << ((cachereg & 0x7) + 4);
239 			arm_picache_ways = ((cachereg >> 3) & 0x3ff) + 1;
240 			arm_picache_size = arm_picache_line_size * arm_picache_ways * sets;
241 		}
242 	}
243 
244 	arm_dcache_align = arm_pdcache_line_size;
245 	arm_dcache_align_mask = arm_dcache_align - 1;
246 
247 	arm_dcache_l2_nsets = arm_pdcache_size/arm_pdcache_ways/arm_pdcache_line_size;
248 	arm_dcache_l2_assoc = log2(arm_pdcache_ways);
249 	arm_dcache_l2_linesize = log2(arm_pdcache_line_size);
250 }
251 
252 /*
253  */
254 void
armv7_idcache_wbinv_all(void)255 armv7_idcache_wbinv_all(void)
256 {
257 	uint32_t arg;
258 	arg = 0;
259 	__asm volatile("mcr	p15, 0, r0, c7, c5, 0" :: "r" (arg));
260 	armv7_dcache_wbinv_all();
261 }
262 
263 /* brute force cache flushing */
264 void
armv7_dcache_wbinv_all(void)265 armv7_dcache_wbinv_all(void)
266 {
267 	int sets, ways, lvl;
268 	int nsets, nways;
269 	uint32_t wayincr, setincr;
270 	uint32_t wayval, setval;
271 	uint32_t word;
272 
273 	nsets = arm_dcache_l2_nsets;
274 	nways = arm_pdcache_ways;
275 
276 	setincr = armv7_dcache_sets_inc;
277 	wayincr = armv7_dcache_index_inc;
278 
279 #if 0
280 	printf("l1 nsets %d nways %d wayincr %x setincr %x\n",
281 	    nsets, nways, wayincr, setincr);
282 #endif
283 
284 	lvl = 0; /* L1 */
285 	setval = 0;
286 	for (sets = 0; sets < nsets; sets++)  {
287 		wayval = 0;
288 		for (ways = 0; ways < nways; ways++) {
289 			word = wayval | setval | lvl;
290 
291 			/* Clean D cache SE with Set/Index */
292 			__asm volatile("mcr	p15, 0, %0, c7, c10, 2"
293 			    : : "r" (word));
294 			wayval += wayincr;
295 		}
296 		setval += setincr;
297 	}
298 	/* drain the write buffer */
299 	cpu_drain_writebuf();
300 
301 	/* L2 cache flushing removed. Our current L2 caches are separate. */
302 }
303 
304 
305 /*
306  * Cannot panic here as we may not have a console yet ...
307  */
308 
309 int
set_cpufuncs(void)310 set_cpufuncs(void)
311 {
312 	cputype = cpufunc_id();
313 	cputype &= CPU_ID_CPU_MASK;
314 
315 	/*
316 	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
317 	 * CPU type where we want to use it by default, then we set it.
318 	 */
319 
320 	if ((cputype & CPU_ID_ARCH_MASK) == CPU_ID_ARCH_CPUID) {
321 		uint32_t mmfr0;
322 
323 		__asm volatile("mrc p15, 0, %0, c0, c1, 4"
324 			: "=r" (mmfr0));
325 
326 		switch (mmfr0 & ID_MMFR0_VMSA_MASK) {
327 		case VMSA_V7:
328 		case VMSA_V7_PXN:
329 		case VMSA_V7_LDT:
330 			cpufuncs = armv7_cpufuncs;
331 			/* V4 or higher */
332 			arm_get_cachetype_cp15v7();
333 			armv7_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
334 			armv7_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
335 			    arm_dcache_l2_nsets)) - armv7_dcache_sets_inc;
336 			armv7_dcache_index_inc = 1U << (32 -
337 			    arm_dcache_l2_assoc);
338 			armv7_dcache_index_max = 0U - armv7_dcache_index_inc;
339 			pmap_pte_init_armv7();
340 
341 			/* Use powersave on this CPU. */
342 			cpu_do_powersave = 1;
343 			return 0;
344 		}
345 	}
346 	/*
347 	 * Bzzzz. And the answer was ...
348 	 */
349 	panic("No support for this CPU type (%08x) in kernel", cputype);
350 	return(ARCHITECTURE_NOT_PRESENT);
351 }
352 
353 /*
354  * CPU Setup code
355  */
356 
357 void
armv7_setup(void)358 armv7_setup(void)
359 {
360 	uint32_t auxctrl, auxctrlmask;
361 	uint32_t cpuctrl, cpuctrlmask;
362 	uint32_t id_pfr1;
363 
364 	auxctrl = auxctrlmask = 0;
365 
366 	switch (cputype & CPU_ID_CORTEX_MASK) {
367 	case CPU_ID_CORTEX_A5:
368 	case CPU_ID_CORTEX_A9:
369 		/* Cache and TLB maintenance broadcast */
370 #ifdef notyet
371 		auxctrlmask |= CORTEXA9_AUXCTL_FW;
372 		auxctrl |= CORTEXA9_AUXCTL_FW;
373 #endif
374 		/* FALLTHROUGH */
375 	case CPU_ID_CORTEX_A7:
376 	case CPU_ID_CORTEX_A12:
377 	case CPU_ID_CORTEX_A15:
378 	case CPU_ID_CORTEX_A17:
379 		/* Set SMP to allow LDREX/STREX */
380 		auxctrlmask |= CORTEXA9_AUXCTL_SMP;
381 		auxctrl |= CORTEXA9_AUXCTL_SMP;
382 		break;
383 	}
384 
385 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE
386 	    | CPU_CONTROL_AFLT_ENABLE
387 	    | CPU_CONTROL_DC_ENABLE
388 	    | CPU_CONTROL_BPRD_ENABLE
389 	    | CPU_CONTROL_IC_ENABLE
390 	    | CPU_CONTROL_VECRELOC
391 	    | CPU_CONTROL_TRE
392 	    | CPU_CONTROL_AFE;
393 
394 	cpuctrl = CPU_CONTROL_MMU_ENABLE
395 	    | CPU_CONTROL_DC_ENABLE
396 	    | CPU_CONTROL_BPRD_ENABLE
397 	    | CPU_CONTROL_IC_ENABLE
398 	    | CPU_CONTROL_AFE;
399 
400 	if (vector_page == ARM_VECTORS_HIGH)
401 		cpuctrl |= CPU_CONTROL_VECRELOC;
402 
403 	/*
404 	 * Check for the Virtualization Extensions and enable UWXN of
405 	 * those are included.
406 	 */
407 	__asm volatile("mrc p15, 0, %0, c0, c1, 1" : "=r"(id_pfr1));
408 	if ((id_pfr1 & 0x0000f000) == 0x00001000) {
409 		cpuctrlmask |= CPU_CONTROL_UWXN;
410 		cpuctrl |= CPU_CONTROL_UWXN;
411 	}
412 
413 	/* Clear out the cache */
414 	cpu_idcache_wbinv_all();
415 
416 	/*
417 	 * Set the auxiliary control register first, as the SMP bit
418 	 * needs to be set to 1 before the caches and the MMU are
419 	 * enabled.
420 	 */
421 	cpu_auxcontrol(auxctrlmask, auxctrl);
422 
423 	/* Set the control register */
424 	cpu_control(cpuctrlmask, cpuctrl);
425 
426 	/* And again. */
427 	cpu_idcache_wbinv_all();
428 }
429