xref: /freebsd/sys/arm/arm/cpufunc.c (revision 1f474190)
1 /*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/bus.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/uma.h>
61 
62 #include <machine/cpufunc.h>
63 
64 /* PRIMARY CACHE VARIABLES */
65 int	arm_picache_size;
66 int	arm_picache_line_size;
67 int	arm_picache_ways;
68 
69 int	arm_pdcache_size;	/* and unified */
70 int	arm_pdcache_line_size;
71 int	arm_pdcache_ways;
72 
73 int	arm_pcache_type;
74 int	arm_pcache_unified;
75 
76 int	arm_dcache_align;
77 int	arm_dcache_align_mask;
78 
79 u_int	arm_cache_level;
80 u_int	arm_cache_type[14];
81 u_int	arm_cache_loc;
82 
83 #if defined(CPU_ARM9E)
84 static void arm10_setup(void);
85 #endif
86 #ifdef CPU_MV_PJ4B
87 static void pj4bv7_setup(void);
88 #endif
89 #if defined(CPU_ARM1176)
90 static void arm11x6_setup(void);
91 #endif
92 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
93 static void cortexa_setup(void);
94 #endif
95 
96 #if defined(CPU_ARM9E)
97 struct cpu_functions armv5_ec_cpufuncs = {
98 	/* CPU functions */
99 
100 	cpufunc_nullop,			/* cpwait		*/
101 
102 	/* MMU functions */
103 
104 	cpufunc_control,		/* control		*/
105 	armv5_ec_setttb,		/* Setttb		*/
106 
107 	/* TLB functions */
108 
109 	armv4_tlb_flushID,		/* tlb_flushID		*/
110 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
111 	armv4_tlb_flushD,		/* tlb_flushD		*/
112 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
113 
114 	/* Cache operations */
115 
116 	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
117 
118 	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
119 	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
120 	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
121 	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
122 
123 	armv4_idcache_inv_all,		/* idcache_inv_all	*/
124 	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
125 	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
126 
127 	cpufunc_nullop,                 /* l2cache_wbinv_all    */
128 	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
129       	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
130 	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
131 	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
132 
133 	/* Other functions */
134 
135 	armv4_drain_writebuf,		/* drain_writebuf	*/
136 
137 	(void *)cpufunc_nullop,		/* sleep		*/
138 
139 	/* Soft functions */
140 
141 	arm9_context_switch,		/* context_switch	*/
142 
143 	arm10_setup			/* cpu setup		*/
144 
145 };
146 
147 struct cpu_functions sheeva_cpufuncs = {
148 	/* CPU functions */
149 
150 	cpufunc_nullop,			/* cpwait		*/
151 
152 	/* MMU functions */
153 
154 	cpufunc_control,		/* control		*/
155 	sheeva_setttb,			/* Setttb		*/
156 
157 	/* TLB functions */
158 
159 	armv4_tlb_flushID,		/* tlb_flushID		*/
160 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
161 	armv4_tlb_flushD,		/* tlb_flushD		*/
162 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
163 
164 	/* Cache operations */
165 
166 	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
167 
168 	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
169 	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
170 	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
171 	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
172 
173 	armv4_idcache_inv_all,		/* idcache_inv_all	*/
174 	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
175 	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
176 
177 	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
178 	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
179 	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
180 	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
181 	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
182 
183 	/* Other functions */
184 
185 	armv4_drain_writebuf,		/* drain_writebuf	*/
186 
187 	sheeva_cpu_sleep,		/* sleep		*/
188 
189 	/* Soft functions */
190 
191 	arm9_context_switch,		/* context_switch	*/
192 
193 	arm10_setup			/* cpu setup		*/
194 };
195 #endif /* CPU_ARM9E */
196 
197 #ifdef CPU_MV_PJ4B
198 struct cpu_functions pj4bv7_cpufuncs = {
199 	/* Cache operations */
200 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
201 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
202 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
203 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
204 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
205 
206 	/* Other functions */
207 	.cf_sleep = (void *)cpufunc_nullop,
208 
209 	/* Soft functions */
210 	.cf_setup = pj4bv7_setup
211 };
212 #endif /* CPU_MV_PJ4B */
213 
214 #if defined(CPU_ARM1176)
215 struct cpu_functions arm1176_cpufuncs = {
216 	/* Cache operations */
217 	.cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
218 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
219 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
220 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
221 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
222 
223 	/* Other functions */
224 	.cf_sleep = arm11x6_sleep,
225 
226 	/* Soft functions */
227 	.cf_setup = arm11x6_setup
228 };
229 #endif /*CPU_ARM1176 */
230 
231 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
232 struct cpu_functions cortexa_cpufuncs = {
233 	/* Cache operations */
234 
235 	/*
236 	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
237 	 * L2 cache controller is actually enabled.
238 	 */
239 	.cf_l2cache_wbinv_all = cpufunc_nullop,
240 	.cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
241 	.cf_l2cache_inv_range = (void *)cpufunc_nullop,
242 	.cf_l2cache_wb_range = (void *)cpufunc_nullop,
243 	.cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
244 
245 	/* Other functions */
246 	.cf_sleep = armv7_cpu_sleep,
247 
248 	/* Soft functions */
249 	.cf_setup = cortexa_setup
250 };
251 #endif /* CPU_CORTEXA || CPU_KRAIT */
252 
253 /*
254  * Global constants also used by locore.s
255  */
256 
257 struct cpu_functions cpufuncs;
258 u_int cputype;
259 #if __ARM_ARCH <= 5
260 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore-v4.s */
261 #endif
262 
263 #if defined (CPU_ARM9E) ||	\
264   defined(CPU_ARM1176) ||	\
265   defined(CPU_MV_PJ4B) ||			\
266   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
267 
268 static void get_cachetype_cp15(void);
269 
270 /* Additional cache information local to this file.  Log2 of some of the
271    above numbers.  */
272 static int	arm_dcache_l2_nsets;
273 static int	arm_dcache_l2_assoc;
274 static int	arm_dcache_l2_linesize;
275 
276 static void
277 get_cachetype_cp15(void)
278 {
279 	u_int ctype, isize, dsize, cpuid;
280 	u_int clevel, csize, i, sel;
281 	u_int multiplier;
282 	u_char type;
283 
284 	ctype = cp15_ctr_get();
285 	cpuid = cp15_midr_get();
286 	/*
287 	 * ...and thus spake the ARM ARM:
288 	 *
289 	 * If an <opcode2> value corresponding to an unimplemented or
290 	 * reserved ID register is encountered, the System Control
291 	 * processor returns the value of the main ID register.
292 	 */
293 	if (ctype == cpuid)
294 		goto out;
295 
296 	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
297 		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
298 		    : "=r" (clevel));
299 		arm_cache_level = clevel;
300 		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
301 		i = 0;
302 		while ((type = (clevel & 0x7)) && i < 7) {
303 			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
304 			    type == CACHE_SEP_CACHE) {
305 				sel = i << 1;
306 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
307 				    : : "r" (sel));
308 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
309 				    : "=r" (csize));
310 				arm_cache_type[sel] = csize;
311 				arm_dcache_align = 1 <<
312 				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
313 				arm_dcache_align_mask = arm_dcache_align - 1;
314 			}
315 			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
316 				sel = (i << 1) | 1;
317 				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
318 				    : : "r" (sel));
319 				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
320 				    : "=r" (csize));
321 				arm_cache_type[sel] = csize;
322 			}
323 			i++;
324 			clevel >>= 3;
325 		}
326 	} else {
327 		if ((ctype & CPU_CT_S) == 0)
328 			arm_pcache_unified = 1;
329 
330 		/*
331 		 * If you want to know how this code works, go read the ARM ARM.
332 		 */
333 
334 		arm_pcache_type = CPU_CT_CTYPE(ctype);
335 
336 		if (arm_pcache_unified == 0) {
337 			isize = CPU_CT_ISIZE(ctype);
338 			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
339 			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
340 			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
341 				if (isize & CPU_CT_xSIZE_M)
342 					arm_picache_line_size = 0; /* not present */
343 				else
344 					arm_picache_ways = 1;
345 			} else {
346 				arm_picache_ways = multiplier <<
347 				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
348 			}
349 			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
350 		}
351 
352 		dsize = CPU_CT_DSIZE(ctype);
353 		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
354 		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
355 		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
356 			if (dsize & CPU_CT_xSIZE_M)
357 				arm_pdcache_line_size = 0; /* not present */
358 			else
359 				arm_pdcache_ways = 1;
360 		} else {
361 			arm_pdcache_ways = multiplier <<
362 			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
363 		}
364 		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
365 
366 		arm_dcache_align = arm_pdcache_line_size;
367 
368 		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
369 		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
370 		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
371 		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
372 
373 	out:
374 		arm_dcache_align_mask = arm_dcache_align - 1;
375 	}
376 }
377 #endif /* ARM9 || XSCALE */
378 
379 /*
380  * Cannot panic here as we may not have a console yet ...
381  */
382 
383 int
384 set_cpufuncs(void)
385 {
386 	cputype = cp15_midr_get();
387 	cputype &= CPU_ID_CPU_MASK;
388 
389 #if defined(CPU_ARM9E)
390 	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
391 	    cputype == CPU_ID_MV88FR571_41) {
392 		uint32_t sheeva_ctrl;
393 
394 		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
395 		    MV_L2_ENABLE);
396 		/*
397 		 * Workaround for Marvell MV78100 CPU: Cache prefetch
398 		 * mechanism may affect the cache coherency validity,
399 		 * so it needs to be disabled.
400 		 *
401 		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
402 		 * L2 Prefetching Mechanism) for details.
403 		 */
404 		if (cputype == CPU_ID_MV88FR571_VD ||
405 		    cputype == CPU_ID_MV88FR571_41)
406 			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
407 
408 		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
409 
410 		cpufuncs = sheeva_cpufuncs;
411 		get_cachetype_cp15();
412 		pmap_pte_init_generic();
413 		goto out;
414 	} else if (cputype == CPU_ID_ARM926EJS) {
415 		cpufuncs = armv5_ec_cpufuncs;
416 		get_cachetype_cp15();
417 		pmap_pte_init_generic();
418 		goto out;
419 	}
420 #endif /* CPU_ARM9E */
421 #if defined(CPU_ARM1176)
422 	if (cputype == CPU_ID_ARM1176JZS) {
423 		cpufuncs = arm1176_cpufuncs;
424 		get_cachetype_cp15();
425 		goto out;
426 	}
427 #endif /* CPU_ARM1176 */
428 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
429 	switch(cputype & CPU_ID_SCHEME_MASK) {
430 	case CPU_ID_CORTEXA5:
431 	case CPU_ID_CORTEXA7:
432 	case CPU_ID_CORTEXA8:
433 	case CPU_ID_CORTEXA9:
434 	case CPU_ID_CORTEXA12:
435 	case CPU_ID_CORTEXA15:
436 	case CPU_ID_CORTEXA53:
437 	case CPU_ID_CORTEXA57:
438 	case CPU_ID_CORTEXA72:
439 	case CPU_ID_KRAIT300:
440 		cpufuncs = cortexa_cpufuncs;
441 		get_cachetype_cp15();
442 		goto out;
443 	default:
444 		break;
445 	}
446 #endif /* CPU_CORTEXA || CPU_KRAIT */
447 
448 #if defined(CPU_MV_PJ4B)
449 	if (cputype == CPU_ID_MV88SV581X_V7 ||
450 	    cputype == CPU_ID_MV88SV584X_V7 ||
451 	    cputype == CPU_ID_ARM_88SV581X_V7) {
452 		cpufuncs = pj4bv7_cpufuncs;
453 		get_cachetype_cp15();
454 		goto out;
455 	}
456 #endif /* CPU_MV_PJ4B */
457 
458 	/*
459 	 * Bzzzz. And the answer was ...
460 	 */
461 	panic("No support for this CPU type (%08x) in kernel", cputype);
462 	return(ARCHITECTURE_NOT_PRESENT);
463 out:
464 	uma_set_align(arm_dcache_align_mask);
465 	return (0);
466 }
467 
468 /*
469  * CPU Setup code
470  */
471 
472 #if defined(CPU_ARM9E)
473 static void
474 arm10_setup(void)
475 {
476 	int cpuctrl, cpuctrlmask;
477 
478 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
479 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
480 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
481 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
482 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
483 	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
484 	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
485 	    | CPU_CONTROL_BPRD_ENABLE
486 	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
487 
488 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
489 	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
490 #endif
491 
492 #ifdef __ARMEB__
493 	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
494 #endif
495 
496 	/* Clear out the cache */
497 	cpu_idcache_wbinv_all();
498 
499 	/* Now really make sure they are clean.  */
500 	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
501 
502 	if (vector_page == ARM_VECTORS_HIGH)
503 		cpuctrl |= CPU_CONTROL_VECRELOC;
504 
505 	/* Set the control register */
506 	cpu_control(0xffffffff, cpuctrl);
507 
508 	/* And again. */
509 	cpu_idcache_wbinv_all();
510 }
511 #endif	/* CPU_ARM9E || CPU_ARM10 */
512 
513 #if defined(CPU_ARM1176) \
514  || defined(CPU_MV_PJ4B) \
515  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
516 static __inline void
517 cpu_scc_setup_ccnt(void)
518 {
519 /* This is how you give userland access to the CCNT and PMCn
520  * registers.
521  * BEWARE! This gives write access also, which may not be what
522  * you want!
523  */
524 #ifdef _PMC_USER_READ_WRITE_
525 	/* Set PMUSERENR[0] to allow userland access */
526 	cp15_pmuserenr_set(1);
527 #endif
528 #if defined(CPU_ARM1176)
529 	/* Set PMCR[2,0] to enable counters and reset CCNT */
530 	cp15_pmcr_set(5);
531 #else
532 	/* Set up the PMCCNTR register as a cyclecounter:
533 	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
534 	 * Set PMCR[2,0] to enable counters and reset CCNT
535 	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
536 	cp15_pminten_clr(0xFFFFFFFF);
537 	cp15_pmcr_set(5);
538 	cp15_pmcnten_set(0x80000000);
539 #endif
540 }
541 #endif
542 
543 #if defined(CPU_ARM1176)
544 static void
545 arm11x6_setup(void)
546 {
547 	uint32_t auxctrl, auxctrl_wax;
548 	uint32_t tmp, tmp2;
549 	uint32_t cpuid;
550 
551 	cpuid = cp15_midr_get();
552 
553 	auxctrl = 0;
554 	auxctrl_wax = ~0;
555 
556 	/*
557 	 * Enable an errata workaround
558 	 */
559 	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
560 		auxctrl = ARM1176_AUXCTL_PHD;
561 		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
562 	}
563 
564 	tmp = cp15_actlr_get();
565 	tmp2 = tmp;
566 	tmp &= auxctrl_wax;
567 	tmp |= auxctrl;
568 	if (tmp != tmp2)
569 		cp15_actlr_set(tmp);
570 
571 	cpu_scc_setup_ccnt();
572 }
573 #endif  /* CPU_ARM1176 */
574 
575 #ifdef CPU_MV_PJ4B
576 static void
577 pj4bv7_setup(void)
578 {
579 
580 	pj4b_config();
581 	cpu_scc_setup_ccnt();
582 }
583 #endif /* CPU_MV_PJ4B */
584 
585 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
586 static void
587 cortexa_setup(void)
588 {
589 
590 	cpu_scc_setup_ccnt();
591 }
592 #endif  /* CPU_CORTEXA || CPU_KRAIT */
593