xref: /netbsd/sys/arch/arm/arm/cpufunc.c (revision bf9ec67e)
1 /*	$NetBSD: cpufunc.c,v 1.45 2002/05/22 19:06:23 thorpej Exp $	*/
2 
3 /*
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 
48 #include "opt_compat_netbsd.h"
49 #include "opt_cpuoptions.h"
50 
51 #include <sys/types.h>
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <machine/cpu.h>
55 #include <machine/bootconfig.h>
56 #include <arch/arm/arm/disassem.h>
57 
58 #include <uvm/uvm.h>
59 
60 #include <arm/cpuconf.h>
61 #include <arm/cpufunc.h>
62 
63 #ifdef CPU_XSCALE_80200
64 #include <arm/xscale/i80200reg.h>
65 #include <arm/xscale/i80200var.h>
66 #endif
67 
68 #ifdef CPU_XSCALE_80321
69 #include <arm/xscale/i80321reg.h>
70 #include <arm/xscale/i80321var.h>
71 #endif
72 
73 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
74 #include <arm/xscale/xscalereg.h>
75 #endif
76 
77 /* PRIMARY CACHE VARIABLES */
78 int	arm_picache_size;
79 int	arm_picache_line_size;
80 int	arm_picache_ways;
81 
82 int	arm_pdcache_size;	/* and unified */
83 int	arm_pdcache_line_size;
84 int	arm_pdcache_ways;
85 
86 int	arm_pcache_type;
87 int	arm_pcache_unified;
88 
89 int	arm_dcache_align;
90 int	arm_dcache_align_mask;
91 
92 #ifdef CPU_ARM3
93 struct cpu_functions arm3_cpufuncs = {
94 	/* CPU functions */
95 
96 	cpufunc_id,			/* id			*/
97 	cpufunc_nullop,			/* cpwait		*/
98 
99 	/* MMU functions */
100 
101 	arm3_control,			/* control		*/
102 	NULL,				/* domain		*/
103 	NULL,				/* setttb		*/
104 	NULL,				/* faultstatus		*/
105 	NULL,				/* faultaddress		*/
106 
107 	/* TLB functions */
108 
109 	cpufunc_nullop,			/* tlb_flushID		*/
110 	(void *)cpufunc_nullop,		/* tlb_flushID_SE	*/
111 	cpufunc_nullop,			/* tlb_flushI		*/
112 	(void *)cpufunc_nullop,		/* tlb_flushI_SE	*/
113 	cpufunc_nullop,			/* tlb_flushD		*/
114 	(void *)cpufunc_nullop,		/* tlb_flushD_SE	*/
115 
116 	/* Cache operations */
117 
118 	cpufunc_nullop,			/* icache_sync_all	*/
119 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
120 
121 	arm3_cache_flush,		/* dcache_wbinv_all	*/
122 	(void *)arm3_cache_flush,	/* dcache_wbinv_range	*/
123 	(void *)arm3_cache_flush,	/* dcache_inv_range	*/
124 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
125 
126 	arm3_cache_flush,		/* idcache_wbinv_all	*/
127 	(void *)arm3_cache_flush,	/* idcache_wbinv_range	*/
128 
129 	/* Other functions */
130 
131 	cpufunc_nullop,			/* flush_prefetchbuf	*/
132 	cpufunc_nullop,			/* drain_writebuf	*/
133 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
134 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
135 
136 	(void *)cpufunc_nullop,		/* sleep		*/
137 
138 	/* Soft functions */
139 
140 	early_abort_fixup,		/* dataabt_fixup	*/
141 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
142 
143 	NULL,				/* context_switch	*/
144 
145 	(void *)cpufunc_nullop		/* cpu setup		*/
146 
147 };
148 #endif	/* CPU_ARM3 */
149 
150 #ifdef CPU_ARM6
151 struct cpu_functions arm6_cpufuncs = {
152 	/* CPU functions */
153 
154 	cpufunc_id,			/* id			*/
155 	cpufunc_nullop,			/* cpwait		*/
156 
157 	/* MMU functions */
158 
159 	cpufunc_control,		/* control		*/
160 	cpufunc_domains,		/* domain		*/
161 	arm67_setttb,			/* setttb		*/
162 	cpufunc_faultstatus,		/* faultstatus		*/
163 	cpufunc_faultaddress,		/* faultaddress		*/
164 
165 	/* TLB functions */
166 
167 	arm67_tlb_flush,		/* tlb_flushID		*/
168 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
169 	arm67_tlb_flush,		/* tlb_flushI		*/
170 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
171 	arm67_tlb_flush,		/* tlb_flushD		*/
172 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
173 
174 	/* Cache operations */
175 
176 	cpufunc_nullop,			/* icache_sync_all	*/
177 	(void *) cpufunc_nullop,	/* icache_sync_range	*/
178 
179 	arm67_cache_flush,		/* dcache_wbinv_all	*/
180 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
181 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
182 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
183 
184 	arm67_cache_flush,		/* idcache_wbinv_all	*/
185 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
186 
187 	/* Other functions */
188 
189 	cpufunc_nullop,			/* flush_prefetchbuf	*/
190 	cpufunc_nullop,			/* drain_writebuf	*/
191 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
192 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
193 
194 	(void *)cpufunc_nullop,		/* sleep		*/
195 
196 	/* Soft functions */
197 
198 #ifdef ARM6_LATE_ABORT
199 	late_abort_fixup,		/* dataabt_fixup	*/
200 #else
201 	early_abort_fixup,		/* dataabt_fixup	*/
202 #endif
203 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
204 
205 	arm67_context_switch,		/* context_switch	*/
206 
207 	arm6_setup			/* cpu setup		*/
208 
209 };
210 #endif	/* CPU_ARM6 */
211 
212 #ifdef CPU_ARM7
213 struct cpu_functions arm7_cpufuncs = {
214 	/* CPU functions */
215 
216 	cpufunc_id,			/* id			*/
217 	cpufunc_nullop,			/* cpwait		*/
218 
219 	/* MMU functions */
220 
221 	cpufunc_control,		/* control		*/
222 	cpufunc_domains,		/* domain		*/
223 	arm67_setttb,			/* setttb		*/
224 	cpufunc_faultstatus,		/* faultstatus		*/
225 	cpufunc_faultaddress,		/* faultaddress		*/
226 
227 	/* TLB functions */
228 
229 	arm67_tlb_flush,		/* tlb_flushID		*/
230 	arm67_tlb_purge,		/* tlb_flushID_SE	*/
231 	arm67_tlb_flush,		/* tlb_flushI		*/
232 	arm67_tlb_purge,		/* tlb_flushI_SE	*/
233 	arm67_tlb_flush,		/* tlb_flushD		*/
234 	arm67_tlb_purge,		/* tlb_flushD_SE	*/
235 
236 	/* Cache operations */
237 
238 	cpufunc_nullop,			/* icache_sync_all	*/
239 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
240 
241 	arm67_cache_flush,		/* dcache_wbinv_all	*/
242 	(void *)arm67_cache_flush,	/* dcache_wbinv_range	*/
243 	(void *)arm67_cache_flush,	/* dcache_inv_range	*/
244 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
245 
246 	arm67_cache_flush,		/* idcache_wbinv_all	*/
247 	(void *)arm67_cache_flush,	/* idcache_wbinv_range	*/
248 
249 	/* Other functions */
250 
251 	cpufunc_nullop,			/* flush_prefetchbuf	*/
252 	cpufunc_nullop,			/* drain_writebuf	*/
253 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
254 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
255 
256 	(void *)cpufunc_nullop,		/* sleep		*/
257 
258 	/* Soft functions */
259 
260 	late_abort_fixup,		/* dataabt_fixup	*/
261 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
262 
263 	arm67_context_switch,		/* context_switch	*/
264 
265 	arm7_setup			/* cpu setup		*/
266 
267 };
268 #endif	/* CPU_ARM7 */
269 
270 #ifdef CPU_ARM7TDMI
271 struct cpu_functions arm7tdmi_cpufuncs = {
272 	/* CPU functions */
273 
274 	cpufunc_id,			/* id			*/
275 	cpufunc_nullop,			/* cpwait		*/
276 
277 	/* MMU functions */
278 
279 	cpufunc_control,		/* control		*/
280 	cpufunc_domains,		/* domain		*/
281 	arm7tdmi_setttb,		/* setttb		*/
282 	cpufunc_faultstatus,		/* faultstatus		*/
283 	cpufunc_faultaddress,		/* faultaddress		*/
284 
285 	/* TLB functions */
286 
287 	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
288 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
289 	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
290 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
291 	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
292 	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
293 
294 	/* Cache operations */
295 
296 	cpufunc_nullop,			/* icache_sync_all	*/
297 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
298 
299 	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
300 	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
301 	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
302 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
303 
304 	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
305 	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
306 
307 	/* Other functions */
308 
309 	cpufunc_nullop,			/* flush_prefetchbuf	*/
310 	cpufunc_nullop,			/* drain_writebuf	*/
311 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
312 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
313 
314 	(void *)cpufunc_nullop,		/* sleep		*/
315 
316 	/* Soft functions */
317 
318 	late_abort_fixup,		/* dataabt_fixup	*/
319 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
320 
321 	arm7tdmi_context_switch,	/* context_switch	*/
322 
323 	arm7tdmi_setup			/* cpu setup		*/
324 
325 };
326 #endif	/* CPU_ARM7TDMI */
327 
328 #ifdef CPU_ARM8
329 struct cpu_functions arm8_cpufuncs = {
330 	/* CPU functions */
331 
332 	cpufunc_id,			/* id			*/
333 	cpufunc_nullop,			/* cpwait		*/
334 
335 	/* MMU functions */
336 
337 	cpufunc_control,		/* control		*/
338 	cpufunc_domains,		/* domain		*/
339 	arm8_setttb,			/* setttb		*/
340 	cpufunc_faultstatus,		/* faultstatus		*/
341 	cpufunc_faultaddress,		/* faultaddress		*/
342 
343 	/* TLB functions */
344 
345 	arm8_tlb_flushID,		/* tlb_flushID		*/
346 	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
347 	arm8_tlb_flushID,		/* tlb_flushI		*/
348 	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
349 	arm8_tlb_flushID,		/* tlb_flushD		*/
350 	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
351 
352 	/* Cache operations */
353 
354 	cpufunc_nullop,			/* icache_sync_all	*/
355 	(void *)cpufunc_nullop,		/* icache_sync_range	*/
356 
357 	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
358 	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
359 /*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
360 	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
361 
362 	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
363 	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
364 
365 	/* Other functions */
366 
367 	cpufunc_nullop,			/* flush_prefetchbuf	*/
368 	cpufunc_nullop,			/* drain_writebuf	*/
369 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
370 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
371 
372 	(void *)cpufunc_nullop,		/* sleep		*/
373 
374 	/* Soft functions */
375 
376 	cpufunc_null_fixup,		/* dataabt_fixup	*/
377 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
378 
379 	arm8_context_switch,		/* context_switch	*/
380 
381 	arm8_setup			/* cpu setup		*/
382 };
383 #endif	/* CPU_ARM8 */
384 
385 #ifdef CPU_ARM9
386 struct cpu_functions arm9_cpufuncs = {
387 	/* CPU functions */
388 
389 	cpufunc_id,			/* id			*/
390 	cpufunc_nullop,			/* cpwait		*/
391 
392 	/* MMU functions */
393 
394 	cpufunc_control,		/* control		*/
395 	cpufunc_domains,		/* Domain		*/
396 	arm9_setttb,			/* Setttb		*/
397 	cpufunc_faultstatus,		/* Faultstatus		*/
398 	cpufunc_faultaddress,		/* Faultaddress		*/
399 
400 	/* TLB functions */
401 
402 	armv4_tlb_flushID,		/* tlb_flushID		*/
403 	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
404 	armv4_tlb_flushI,		/* tlb_flushI		*/
405 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
406 	armv4_tlb_flushD,		/* tlb_flushD		*/
407 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
408 
409 	/* Cache operations */
410 
411 	arm9_cache_syncI,		/* icache_sync_all	*/
412 	arm9_cache_syncI_rng,		/* icache_sync_range	*/
413 
414 		/* ...cache in write-though mode... */
415 	arm9_cache_flushD,		/* dcache_wbinv_all	*/
416 	arm9_cache_flushD_rng,		/* dcache_wbinv_range	*/
417 	arm9_cache_flushD_rng,		/* dcache_inv_range	*/
418 	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
419 
420 	arm9_cache_flushID,		/* idcache_wbinv_all	*/
421 	arm9_cache_flushID_rng,		/* idcache_wbinv_range	*/
422 
423 	/* Other functions */
424 
425 	cpufunc_nullop,			/* flush_prefetchbuf	*/
426 	armv4_drain_writebuf,		/* drain_writebuf	*/
427 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
428 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
429 
430 	(void *)cpufunc_nullop,		/* sleep		*/
431 
432 	/* Soft functions */
433 
434 	cpufunc_null_fixup,		/* dataabt_fixup	*/
435 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
436 
437 	arm9_context_switch,		/* context_switch	*/
438 
439 	arm9_setup			/* cpu setup		*/
440 
441 };
442 #endif /* CPU_ARM9 */
443 
444 #ifdef CPU_SA110
445 struct cpu_functions sa110_cpufuncs = {
446 	/* CPU functions */
447 
448 	cpufunc_id,			/* id			*/
449 	cpufunc_nullop,			/* cpwait		*/
450 
451 	/* MMU functions */
452 
453 	cpufunc_control,		/* control		*/
454 	cpufunc_domains,		/* domain		*/
455 	sa1_setttb,			/* setttb		*/
456 	cpufunc_faultstatus,		/* faultstatus		*/
457 	cpufunc_faultaddress,		/* faultaddress		*/
458 
459 	/* TLB functions */
460 
461 	armv4_tlb_flushID,		/* tlb_flushID		*/
462 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
463 	armv4_tlb_flushI,		/* tlb_flushI		*/
464 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
465 	armv4_tlb_flushD,		/* tlb_flushD		*/
466 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
467 
468 	/* Cache operations */
469 
470 	sa1_cache_syncI,		/* icache_sync_all	*/
471 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
472 
473 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
474 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
475 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
476 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
477 
478 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
479 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
480 
481 	/* Other functions */
482 
483 	cpufunc_nullop,			/* flush_prefetchbuf	*/
484 	armv4_drain_writebuf,		/* drain_writebuf	*/
485 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
486 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
487 
488 	(void *)cpufunc_nullop,		/* sleep		*/
489 
490 	/* Soft functions */
491 
492 	cpufunc_null_fixup,		/* dataabt_fixup	*/
493 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
494 
495 	sa110_context_switch,		/* context_switch	*/
496 
497 	sa110_setup			/* cpu setup		*/
498 };
499 #endif	/* CPU_SA110 */
500 
501 #if defined(CPU_SA1100) || defined(CPU_SA1110)
502 struct cpu_functions sa11x0_cpufuncs = {
503 	/* CPU functions */
504 
505 	cpufunc_id,			/* id			*/
506 	cpufunc_nullop,			/* cpwait		*/
507 
508 	/* MMU functions */
509 
510 	cpufunc_control,		/* control		*/
511 	cpufunc_domains,		/* domain		*/
512 	sa1_setttb,			/* setttb		*/
513 	cpufunc_faultstatus,		/* faultstatus		*/
514 	cpufunc_faultaddress,		/* faultaddress		*/
515 
516 	/* TLB functions */
517 
518 	armv4_tlb_flushID,		/* tlb_flushID		*/
519 	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
520 	armv4_tlb_flushI,		/* tlb_flushI		*/
521 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
522 	armv4_tlb_flushD,		/* tlb_flushD		*/
523 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
524 
525 	/* Cache operations */
526 
527 	sa1_cache_syncI,		/* icache_sync_all	*/
528 	sa1_cache_syncI_rng,		/* icache_sync_range	*/
529 
530 	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
531 	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
532 /*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
533 	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
534 
535 	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
536 	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
537 
538 	/* Other functions */
539 
540 	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
541 	armv4_drain_writebuf,		/* drain_writebuf	*/
542 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
543 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
544 
545 	sa11x0_cpu_sleep,		/* sleep		*/
546 
547 	/* Soft functions */
548 
549 	cpufunc_null_fixup,		/* dataabt_fixup	*/
550 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
551 
552 	sa11x0_context_switch,		/* context_switch	*/
553 
554 	sa11x0_setup			/* cpu setup		*/
555 };
556 #endif	/* CPU_SA1100 || CPU_SA1110 */
557 
558 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
559     defined(CPU_XSCALE_PXA2X0)
560 struct cpu_functions xscale_cpufuncs = {
561 	/* CPU functions */
562 
563 	cpufunc_id,			/* id			*/
564 	xscale_cpwait,			/* cpwait		*/
565 
566 	/* MMU functions */
567 
568 	xscale_control,			/* control		*/
569 	cpufunc_domains,		/* domain		*/
570 	xscale_setttb,			/* setttb		*/
571 	cpufunc_faultstatus,		/* faultstatus		*/
572 	cpufunc_faultaddress,		/* faultaddress		*/
573 
574 	/* TLB functions */
575 
576 	armv4_tlb_flushID,		/* tlb_flushID		*/
577 	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
578 	armv4_tlb_flushI,		/* tlb_flushI		*/
579 	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
580 	armv4_tlb_flushD,		/* tlb_flushD		*/
581 	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
582 
583 	/* Cache operations */
584 
585 	xscale_cache_syncI,		/* icache_sync_all	*/
586 	xscale_cache_syncI_rng,		/* icache_sync_range	*/
587 
588 	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
589 	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
590 	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
591 	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
592 
593 	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
594 	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
595 
596 	/* Other functions */
597 
598 	cpufunc_nullop,			/* flush_prefetchbuf	*/
599 	armv4_drain_writebuf,		/* drain_writebuf	*/
600 	cpufunc_nullop,			/* flush_brnchtgt_C	*/
601 	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
602 
603 	xscale_cpu_sleep,		/* sleep		*/
604 
605 	/* Soft functions */
606 
607 	cpufunc_null_fixup,		/* dataabt_fixup	*/
608 	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
609 
610 	xscale_context_switch,		/* context_switch	*/
611 
612 	xscale_setup			/* cpu setup		*/
613 };
614 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */
615 
616 /*
617  * Global constants also used by locore.s
618  */
619 
620 struct cpu_functions cpufuncs;
621 u_int cputype;
622 u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
623 
624 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
625     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
626     defined(CPU_XSCALE_PXA2X0)
627 static void get_cachetype_cp15 __P((void));
628 
629 static void
630 get_cachetype_cp15()
631 {
632 	u_int ctype, isize, dsize;
633 	u_int multiplier;
634 
635 	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
636 		: "=r" (ctype));
637 
638 	/*
639 	 * ...and thus spake the ARM ARM:
640 	 *
641 	 * If an <opcode2> value corresponding to an unimplemented or
642 	 * reserved ID register is encountered, the System Control
643 	 * processor returns the value of the main ID register.
644 	 */
645 	if (ctype == cpufunc_id())
646 		goto out;
647 
648 	if ((ctype & CPU_CT_S) == 0)
649 		arm_pcache_unified = 1;
650 
651 	/*
652 	 * If you want to know how this code works, go read the ARM ARM.
653 	 */
654 
655 	arm_pcache_type = CPU_CT_CTYPE(ctype);
656 
657 	if (arm_pcache_unified == 0) {
658 		isize = CPU_CT_ISIZE(ctype);
659 		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
660 		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
661 		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
662 			if (isize & CPU_CT_xSIZE_M)
663 				arm_picache_line_size = 0; /* not present */
664 			else
665 				arm_picache_ways = 1;
666 		} else {
667 			arm_picache_ways = multiplier <<
668 			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
669 		}
670 		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
671 	}
672 
673 	dsize = CPU_CT_DSIZE(ctype);
674 	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
675 	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
676 	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
677 		if (dsize & CPU_CT_xSIZE_M)
678 			arm_pdcache_line_size = 0; /* not present */
679 		else
680 			arm_pdcache_ways = 0;
681 	} else {
682 		arm_pdcache_ways = multiplier <<
683 		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
684 	}
685 	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
686 
687 	arm_dcache_align = arm_pdcache_line_size;
688 
689  out:
690 	arm_dcache_align_mask = arm_dcache_align - 1;
691 }
692 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
693 
694 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
695     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
696     defined(CPU_SA1100) || defined(CPU_SA1110)
697 /* Cache information for CPUs without cache type registers. */
698 struct cachetab {
699 	u_int32_t ct_cpuid;
700 	int	ct_pcache_type;
701 	int	ct_pcache_unified;
702 	int	ct_pdcache_size;
703 	int	ct_pdcache_line_size;
704 	int	ct_pdcache_ways;
705 	int	ct_picache_size;
706 	int	ct_picache_line_size;
707 	int	ct_picache_ways;
708 };
709 
710 struct cachetab cachetab[] = {
711     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
712     { CPU_ID_ARM2,      0,                1,     0,  0,  0,     0,  0,  0 },
713     { CPU_ID_ARM250,    0,                1,     0,  0,  0,     0,  0,  0 },
714     { CPU_ID_ARM3,      CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
715     { CPU_ID_ARM610,	CPU_CT_CTYPE_WT,  1,  4096, 16, 64,     0,  0,  0 },
716     { CPU_ID_ARM710,    CPU_CT_CTYPE_WT,  1,  8192, 32,  4,     0,  0,  0 },
717     { CPU_ID_ARM7500,   CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
718     { CPU_ID_ARM710A,   CPU_CT_CTYPE_WT,  1,  8192, 16,  4,     0,  0,  0 },
719     { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT,  1,  4096, 16,  4,     0,  0,  0 },
720     /* XXX is this type right for SA-1? */
721     { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
722     { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
723     { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
724     { 0, 0, 0, 0, 0, 0, 0, 0}
725 };
726 
727 static void get_cachetype_table __P((void));
728 
729 static void
730 get_cachetype_table()
731 {
732 	int i;
733 	u_int32_t cpuid = cpufunc_id();
734 
735 	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
736 		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
737 			arm_pcache_type = cachetab[i].ct_pcache_type;
738 			arm_pcache_unified = cachetab[i].ct_pcache_unified;
739 			arm_pdcache_size = cachetab[i].ct_pdcache_size;
740 			arm_pdcache_line_size =
741 			    cachetab[i].ct_pdcache_line_size;
742 			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
743 			arm_picache_size = cachetab[i].ct_picache_size;
744 			arm_picache_line_size =
745 			    cachetab[i].ct_picache_line_size;
746 			arm_picache_ways = cachetab[i].ct_picache_ways;
747 		}
748 	}
749 	arm_dcache_align = arm_pdcache_line_size;
750 
751 	arm_dcache_align_mask = arm_dcache_align - 1;
752 }
753 
754 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 */
755 
756 /*
757  * Cannot panic here as we may not have a console yet ...
758  */
759 
760 int
761 set_cpufuncs()
762 {
763 	cputype = cpufunc_id();
764 	cputype &= CPU_ID_CPU_MASK;
765 
766 
767 #ifdef CPU_ARM3
768 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
769 	    (cputype & 0x00000f00) == 0x00000300) {
770 		cpufuncs = arm3_cpufuncs;
771 		cpu_reset_needs_v4_MMU_disable = 0;
772 		get_cachetype_table();
773 		return 0;
774 	}
775 #endif	/* CPU_ARM3 */
776 #ifdef CPU_ARM6
777 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
778 	    (cputype & 0x00000f00) == 0x00000600) {
779 		cpufuncs = arm6_cpufuncs;
780 		cpu_reset_needs_v4_MMU_disable = 0;
781 		get_cachetype_table();
782 		pmap_pte_init_generic();
783 		return 0;
784 	}
785 #endif	/* CPU_ARM6 */
786 #ifdef CPU_ARM7
787 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
788 	    CPU_ID_IS7(cputype) &&
789 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
790 		cpufuncs = arm7_cpufuncs;
791 		cpu_reset_needs_v4_MMU_disable = 0;
792 		get_cachetype_table();
793 		pmap_pte_init_generic();
794 		return 0;
795 	}
796 #endif	/* CPU_ARM7 */
797 #ifdef CPU_ARM7TDMI
798 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
799 	    CPU_ID_IS7(cputype) &&
800 	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
801 		cpufuncs = arm7tdmi_cpufuncs;
802 		cpu_reset_needs_v4_MMU_disable = 0;
803 		get_cachetype_cp15();
804 		pmap_pte_init_generic();
805 		return 0;
806 	}
807 #endif
808 #ifdef CPU_ARM8
809 	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
810 	    (cputype & 0x0000f000) == 0x00008000) {
811 		cpufuncs = arm8_cpufuncs;
812 		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
813 		get_cachetype_cp15();
814 		pmap_pte_init_generic();
815 		return 0;
816 	}
817 #endif	/* CPU_ARM8 */
818 #ifdef CPU_ARM9
819 	if (cputype == CPU_ID_ARM920T) {
820 		cpufuncs = arm9_cpufuncs;
821 		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
822 		get_cachetype_cp15();
823 		pmap_pte_init_arm9();
824 		return 0;
825 	}
826 #endif /* CPU_ARM9 */
827 #ifdef CPU_SA110
828 	if (cputype == CPU_ID_SA110) {
829 		cpufuncs = sa110_cpufuncs;
830 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
831 		get_cachetype_table();
832 		pmap_pte_init_generic();
833 		return 0;
834 	}
835 #endif	/* CPU_SA110 */
836 #ifdef CPU_SA1100
837 	if (cputype == CPU_ID_SA1100) {
838 		cpufuncs = sa11x0_cpufuncs;
839 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
840 		get_cachetype_table();
841 		pmap_pte_init_generic();
842 		return 0;
843 	}
844 #endif	/* CPU_SA1100 */
845 #ifdef CPU_SA1110
846 	if (cputype == CPU_ID_SA1110) {
847 		cpufuncs = sa11x0_cpufuncs;
848 		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
849 		get_cachetype_table();
850 		pmap_pte_init_generic();
851 		return 0;
852 	}
853 #endif	/* CPU_SA1110 */
854 #ifdef CPU_XSCALE_80200
855 	if (cputype == CPU_ID_80200) {
856 		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
857 
858 		i80200_icu_init();
859 
860 		/*
861 		 * Reset the Performance Monitoring Unit to a
862 		 * pristine state:
863 		 *	- CCNT, PMN0, PMN1 reset to 0
864 		 *	- overflow indications cleared
865 		 *	- all counters disabled
866 		 */
867 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
868 			:
869 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
870 			       PMNC_CC_IF));
871 
872 #if defined(XSCALE_CCLKCFG)
873 		/*
874 		 * Crank CCLKCFG to maximum legal value.
875 		 */
876 		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
877 			:
878 			: "r" (XSCALE_CCLKCFG));
879 #endif
880 
881 		/*
882 		 * XXX Disable ECC in the Bus Controller Unit; we
883 		 * don't really support it, yet.  Clear any pending
884 		 * error indications.
885 		 */
886 		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
887 			:
888 			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
889 
890 		cpufuncs = xscale_cpufuncs;
891 
892 		/*
893 		 * i80200 errata: Step-A0 and A1 have a bug where
894 		 * D$ dirty bits are not cleared on "invalidate by
895 		 * address".
896 		 *
897 		 * Workaround: Clean cache line before invalidating.
898 		 */
899 		if (rev == 0 || rev == 1)
900 			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
901 
902 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
903 		get_cachetype_cp15();
904 		pmap_pte_init_xscale();
905 		return 0;
906 	}
907 #endif /* CPU_XSCALE_80200 */
908 #ifdef CPU_XSCALE_80321
909 	if (cputype == CPU_ID_80321) {
910 		i80321_icu_init();
911 
912 		/*
913 		 * Reset the Performance Monitoring Unit to a
914 		 * pristine state:
915 		 *	- CCNT, PMN0, PMN1 reset to 0
916 		 *	- overflow indications cleared
917 		 *	- all counters disabled
918 		 */
919 		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
920 			:
921 			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
922 			       PMNC_CC_IF));
923 
924 		cpufuncs = xscale_cpufuncs;
925 
926 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
927 		get_cachetype_cp15();
928 		pmap_pte_init_xscale();
929 		return 0;
930 	}
931 #endif /* CPU_XSCALE_80321 */
932 #ifdef CPU_XSCALE_PXA2X0
933 	if (cputype == CPU_ID_PXA250 || cputype == CPU_ID_PXA210) {
934 		cpufuncs = xscale_cpufuncs;
935 
936 		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
937 		get_cachetype_cp15();
938 		pmap_pte_init_xscale();
939 		return 0;
940 	}
941 #endif /* CPU_XSCALE_PXA2X0 */
942 	/*
943 	 * Bzzzz. And the answer was ...
944 	 */
945 	panic("No support for this CPU type (%08x) in kernel", cputype);
946 	return(ARCHITECTURE_NOT_PRESENT);
947 }
948 
949 /*
950  * Fixup routines for data and prefetch aborts.
951  *
952  * Several compile time symbols are used
953  *
954  * DEBUG_FAULT_CORRECTION - Print debugging information during the
955  * correction of registers after a fault.
956  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
957  * when defined should use late aborts
958  */
959 
960 
961 /*
962  * Null abort fixup routine.
963  * For use when no fixup is required.
964  */
965 int
966 cpufunc_null_fixup(arg)
967 	void *arg;
968 {
969 	return(ABORT_FIXUP_OK);
970 }
971 
972 
973 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \
974     defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
975 
976 #ifdef DEBUG_FAULT_CORRECTION
977 #define DFC_PRINTF(x)		printf x
978 #define DFC_DISASSEMBLE(x)	disassemble(x)
979 #else
980 #define DFC_PRINTF(x)		/* nothing */
981 #define DFC_DISASSEMBLE(x)	/* nothing */
982 #endif
983 
984 /*
985  * "Early" data abort fixup.
986  *
987  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
988  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
989  *
990  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
991  */
992 int
993 early_abort_fixup(arg)
994 	void *arg;
995 {
996 	trapframe_t *frame = arg;
997 	u_int fault_pc;
998 	u_int fault_instruction;
999 	int saved_lr = 0;
1000 
1001 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1002 
1003 		/* Ok an abort in SVC mode */
1004 
1005 		/*
1006 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1007 		 * as the fault happened in svc mode but we need it in the
1008 		 * usr slot so we can treat the registers as an array of ints
1009 		 * during fixing.
1010 		 * NOTE: This PC is in the position but writeback is not
1011 		 * allowed on r15.
1012 		 * Doing it like this is more efficient than trapping this
1013 		 * case in all possible locations in the following fixup code.
1014 		 */
1015 
1016 		saved_lr = frame->tf_usr_lr;
1017 		frame->tf_usr_lr = frame->tf_svc_lr;
1018 
1019 		/*
1020 		 * Note the trapframe does not have the SVC r13 so a fault
1021 		 * from an instruction with writeback to r13 in SVC mode is
1022 		 * not allowed. This should not happen as the kstack is
1023 		 * always valid.
1024 		 */
1025 	}
1026 
1027 	/* Get fault address and status from the CPU */
1028 
1029 	fault_pc = frame->tf_pc;
1030 	fault_instruction = *((volatile unsigned int *)fault_pc);
1031 
1032 	/* Decode the fault instruction and fix the registers as needed */
1033 
1034 	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1035 		int base;
1036 		int loop;
1037 		int count;
1038 		int *registers = &frame->tf_r0;
1039 
1040 		DFC_PRINTF(("LDM/STM\n"));
1041 		DFC_DISASSEMBLE(fault_pc);
1042 		if (fault_instruction & (1 << 21)) {
1043 			DFC_PRINTF(("This instruction must be corrected\n"));
1044 			base = (fault_instruction >> 16) & 0x0f;
1045 			if (base == 15)
1046 				return ABORT_FIXUP_FAILED;
1047 			/* Count registers transferred */
1048 			count = 0;
1049 			for (loop = 0; loop < 16; ++loop) {
1050 				if (fault_instruction & (1<<loop))
1051 					++count;
1052 			}
1053 			DFC_PRINTF(("%d registers used\n", count));
1054 			DFC_PRINTF(("Corrected r%d by %d bytes ",
1055 				       base, count * 4));
1056 			if (fault_instruction & (1 << 23)) {
1057 				DFC_PRINTF(("down\n"));
1058 				registers[base] -= count * 4;
1059 			} else {
1060 				DFC_PRINTF(("up\n"));
1061 				registers[base] += count * 4;
1062 			}
1063 		}
1064 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1065 		int base;
1066 		int offset;
1067 		int *registers = &frame->tf_r0;
1068 
1069 		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1070 
1071 		DFC_DISASSEMBLE(fault_pc);
1072 
1073 		/* Only need to fix registers if write back is turned on */
1074 
1075 		if ((fault_instruction & (1 << 21)) != 0) {
1076 			base = (fault_instruction >> 16) & 0x0f;
1077 			if (base == 13 &&
1078 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1079 				return ABORT_FIXUP_FAILED;
1080 			if (base == 15)
1081 				return ABORT_FIXUP_FAILED;
1082 
1083 			offset = (fault_instruction & 0xff) << 2;
1084 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1085 			if ((fault_instruction & (1 << 23)) != 0)
1086 				offset = -offset;
1087 			registers[base] += offset;
1088 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1089 		}
1090 	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1091 		return ABORT_FIXUP_FAILED;
1092 
1093 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1094 
1095 		/* Ok an abort in SVC mode */
1096 
1097 		/*
1098 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1099 		 * as the fault happened in svc mode but we need it in the
1100 		 * usr slot so we can treat the registers as an array of ints
1101 		 * during fixing.
1102 		 * NOTE: This PC is in the position but writeback is not
1103 		 * allowed on r15.
1104 		 * Doing it like this is more efficient than trapping this
1105 		 * case in all possible locations in the prior fixup code.
1106 		 */
1107 
1108 		frame->tf_svc_lr = frame->tf_usr_lr;
1109 		frame->tf_usr_lr = saved_lr;
1110 
1111 		/*
1112 		 * Note the trapframe does not have the SVC r13 so a fault
1113 		 * from an instruction with writeback to r13 in SVC mode is
1114 		 * not allowed. This should not happen as the kstack is
1115 		 * always valid.
1116 		 */
1117 	}
1118 
1119 	return(ABORT_FIXUP_OK);
1120 }
1121 #endif	/* CPU_ARM2/250/3/6/7 */
1122 
1123 
1124 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
1125 	defined(CPU_ARM7TDMI)
1126 /*
1127  * "Late" (base updated) data abort fixup
1128  *
1129  * For ARM6 (in late-abort mode) and ARM7.
1130  *
1131  * In this model, all data-transfer instructions need fixing up.  We defer
1132  * LDM, STM, LDC and STC fixup to the early-abort handler.
1133  */
1134 int
1135 late_abort_fixup(arg)
1136 	void *arg;
1137 {
1138 	trapframe_t *frame = arg;
1139 	u_int fault_pc;
1140 	u_int fault_instruction;
1141 	int saved_lr = 0;
1142 
1143 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1144 
1145 		/* Ok an abort in SVC mode */
1146 
1147 		/*
1148 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1149 		 * as the fault happened in svc mode but we need it in the
1150 		 * usr slot so we can treat the registers as an array of ints
1151 		 * during fixing.
1152 		 * NOTE: This PC is in the position but writeback is not
1153 		 * allowed on r15.
1154 		 * Doing it like this is more efficient than trapping this
1155 		 * case in all possible locations in the following fixup code.
1156 		 */
1157 
1158 		saved_lr = frame->tf_usr_lr;
1159 		frame->tf_usr_lr = frame->tf_svc_lr;
1160 
1161 		/*
1162 		 * Note the trapframe does not have the SVC r13 so a fault
1163 		 * from an instruction with writeback to r13 in SVC mode is
1164 		 * not allowed. This should not happen as the kstack is
1165 		 * always valid.
1166 		 */
1167 	}
1168 
1169 	/* Get fault address and status from the CPU */
1170 
1171 	fault_pc = frame->tf_pc;
1172 	fault_instruction = *((volatile unsigned int *)fault_pc);
1173 
1174 	/* Decode the fault instruction and fix the registers as needed */
1175 
1176 	/* Was is a swap instruction ? */
1177 
1178 	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1179 		DFC_DISASSEMBLE(fault_pc);
1180 	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1181 
1182 		/* Was is a ldr/str instruction */
1183 		/* This is for late abort only */
1184 
1185 		int base;
1186 		int offset;
1187 		int *registers = &frame->tf_r0;
1188 
1189 		DFC_DISASSEMBLE(fault_pc);
1190 
1191 		/* This is for late abort only */
1192 
1193 		if ((fault_instruction & (1 << 24)) == 0
1194 		    || (fault_instruction & (1 << 21)) != 0) {
1195 			/* postindexed ldr/str with no writeback */
1196 
1197 			base = (fault_instruction >> 16) & 0x0f;
1198 			if (base == 13 &&
1199 			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1200 				return ABORT_FIXUP_FAILED;
1201 			if (base == 15)
1202 				return ABORT_FIXUP_FAILED;
1203 			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1204 				       base, registers[base]));
1205 			if ((fault_instruction & (1 << 25)) == 0) {
1206 				/* Immediate offset - easy */
1207 
1208 				offset = fault_instruction & 0xfff;
1209 				if ((fault_instruction & (1 << 23)))
1210 					offset = -offset;
1211 				registers[base] += offset;
1212 				DFC_PRINTF(("imm=%08x ", offset));
1213 			} else {
1214 				/* offset is a shifted register */
1215 				int shift;
1216 
1217 				offset = fault_instruction & 0x0f;
1218 				if (offset == base)
1219 					return ABORT_FIXUP_FAILED;
1220 
1221 				/*
1222 				 * Register offset - hard we have to
1223 				 * cope with shifts !
1224 				 */
1225 				offset = registers[offset];
1226 
1227 				if ((fault_instruction & (1 << 4)) == 0)
1228 					/* shift with amount */
1229 					shift = (fault_instruction >> 7) & 0x1f;
1230 				else {
1231 					/* shift with register */
1232 					if ((fault_instruction & (1 << 7)) != 0)
1233 						/* undefined for now so bail out */
1234 						return ABORT_FIXUP_FAILED;
1235 					shift = ((fault_instruction >> 8) & 0xf);
1236 					if (base == shift)
1237 						return ABORT_FIXUP_FAILED;
1238 					DFC_PRINTF(("shift reg=%d ", shift));
1239 					shift = registers[shift];
1240 				}
1241 				DFC_PRINTF(("shift=%08x ", shift));
1242 				switch (((fault_instruction >> 5) & 0x3)) {
1243 				case 0 : /* Logical left */
1244 					offset = (int)(((u_int)offset) << shift);
1245 					break;
1246 				case 1 : /* Logical Right */
1247 					if (shift == 0) shift = 32;
1248 					offset = (int)(((u_int)offset) >> shift);
1249 					break;
1250 				case 2 : /* Arithmetic Right */
1251 					if (shift == 0) shift = 32;
1252 					offset = (int)(((int)offset) >> shift);
1253 					break;
1254 				case 3 : /* Rotate right (rol or rxx) */
1255 					return ABORT_FIXUP_FAILED;
1256 					break;
1257 				}
1258 
1259 				DFC_PRINTF(("abt: fixed LDR/STR with "
1260 					       "register offset\n"));
1261 				if ((fault_instruction & (1 << 23)))
1262 					offset = -offset;
1263 				DFC_PRINTF(("offset=%08x ", offset));
1264 				registers[base] += offset;
1265 			}
1266 			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1267 		}
1268 	}
1269 
1270 	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1271 
1272 		/* Ok an abort in SVC mode */
1273 
1274 		/*
1275 		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1276 		 * as the fault happened in svc mode but we need it in the
1277 		 * usr slot so we can treat the registers as an array of ints
1278 		 * during fixing.
1279 		 * NOTE: This PC is in the position but writeback is not
1280 		 * allowed on r15.
1281 		 * Doing it like this is more efficient than trapping this
1282 		 * case in all possible locations in the prior fixup code.
1283 		 */
1284 
1285 		frame->tf_svc_lr = frame->tf_usr_lr;
1286 		frame->tf_usr_lr = saved_lr;
1287 
1288 		/*
1289 		 * Note the trapframe does not have the SVC r13 so a fault
1290 		 * from an instruction with writeback to r13 in SVC mode is
1291 		 * not allowed. This should not happen as the kstack is
1292 		 * always valid.
1293 		 */
1294 	}
1295 
1296 	/*
1297 	 * Now let the early-abort fixup routine have a go, in case it
1298 	 * was an LDM, STM, LDC or STC that faulted.
1299 	 */
1300 
1301 	return early_abort_fixup(arg);
1302 }
1303 #endif	/* CPU_ARM6(LATE)/7/7TDMI */
1304 
1305 /*
1306  * CPU Setup code
1307  */
1308 
1309 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
1310 	defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
1311 	defined(CPU_SA1100) || defined(CPU_SA1110) || \
1312 	defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1313 	defined(CPU_XSCALE_PXA2X0)
1314 
1315 #define IGN	0
1316 #define OR	1
1317 #define BIC	2
1318 
1319 struct cpu_option {
1320 	char	*co_name;
1321 	int	co_falseop;
1322 	int	co_trueop;
1323 	int	co_value;
1324 };
1325 
1326 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int));
1327 
1328 static u_int
1329 parse_cpu_options(args, optlist, cpuctrl)
1330 	char *args;
1331 	struct cpu_option *optlist;
1332 	u_int cpuctrl;
1333 {
1334 	int integer;
1335 
1336 	while (optlist->co_name) {
1337 		if (get_bootconf_option(args, optlist->co_name,
1338 		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1339 			if (integer) {
1340 				if (optlist->co_trueop == OR)
1341 					cpuctrl |= optlist->co_value;
1342 				else if (optlist->co_trueop == BIC)
1343 					cpuctrl &= ~optlist->co_value;
1344 			} else {
1345 				if (optlist->co_falseop == OR)
1346 					cpuctrl |= optlist->co_value;
1347 				else if (optlist->co_falseop == BIC)
1348 					cpuctrl &= ~optlist->co_value;
1349 			}
1350 		}
1351 		++optlist;
1352 	}
1353 	return(cpuctrl);
1354 }
1355 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
1356 
1357 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
1358 	|| defined(CPU_ARM8)
1359 struct cpu_option arm678_options[] = {
1360 #ifdef COMPAT_12
1361 	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1362 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1363 #endif	/* COMPAT_12 */
1364 	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1365 	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1366 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1367 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1368 	{ NULL,			IGN, IGN, 0 }
1369 };
1370 
1371 #endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1372 
1373 #ifdef CPU_ARM6
1374 struct cpu_option arm6_options[] = {
1375 	{ "arm6.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1376 	{ "arm6.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1377 	{ "arm6.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1378 	{ "arm6.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1379 	{ NULL,			IGN, IGN, 0 }
1380 };
1381 
1382 void
1383 arm6_setup(args)
1384 	char *args;
1385 {
1386 	int cpuctrl, cpuctrlmask;
1387 
1388 	/* Set up default control registers bits */
1389 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1390 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1391 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1392 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1393 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1394 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1395 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1396 		 | CPU_CONTROL_AFLT_ENABLE;
1397 
1398 #ifdef ARM6_LATE_ABORT
1399 	cpuctrl |= CPU_CONTROL_LABT_ENABLE;
1400 #endif	/* ARM6_LATE_ABORT */
1401 
1402 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1403 	cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
1404 
1405 	/* Clear out the cache */
1406 	cpu_idcache_wbinv_all();
1407 
1408 	/* Set the control register */
1409 	curcpu()->ci_ctrl = cpuctrl;
1410 	cpu_control(0xffffffff, cpuctrl);
1411 }
1412 #endif	/* CPU_ARM6 */
1413 
1414 #ifdef CPU_ARM7
1415 struct cpu_option arm7_options[] = {
1416 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1417 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1418 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1419 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1420 #ifdef COMPAT_12
1421 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1422 #endif	/* COMPAT_12 */
1423 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1424 	{ NULL,			IGN, IGN, 0 }
1425 };
1426 
1427 void
1428 arm7_setup(args)
1429 	char *args;
1430 {
1431 	int cpuctrl, cpuctrlmask;
1432 
1433 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1434 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1435 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1436 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1437 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1438 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1439 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
1440 		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
1441 		 | CPU_CONTROL_AFLT_ENABLE;
1442 
1443 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1444 	cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
1445 
1446 	/* Clear out the cache */
1447 	cpu_idcache_wbinv_all();
1448 
1449 	/* Set the control register */
1450 	curcpu()->ci_ctrl = cpuctrl;
1451 	cpu_control(0xffffffff, cpuctrl);
1452 }
1453 #endif	/* CPU_ARM7 */
1454 
1455 #ifdef CPU_ARM7TDMI
1456 struct cpu_option arm7tdmi_options[] = {
1457 	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1458 	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1459 	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1460 	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1461 #ifdef COMPAT_12
1462 	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1463 #endif	/* COMPAT_12 */
1464 	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1465 	{ NULL,			IGN, IGN, 0 }
1466 };
1467 
1468 void
1469 arm7tdmi_setup(args)
1470 	char *args;
1471 {
1472 	int cpuctrl;
1473 
1474 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1475 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1476 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1477 
1478 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1479 	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1480 
1481 	/* Clear out the cache */
1482 	cpu_idcache_wbinv_all();
1483 
1484 	/* Set the control register */
1485 	curcpu()->ci_ctrl = cpuctrl;
1486 	cpu_control(0xffffffff, cpuctrl);
1487 }
1488 #endif	/* CPU_ARM7TDMI */
1489 
1490 #ifdef CPU_ARM8
1491 struct cpu_option arm8_options[] = {
1492 	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1493 	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1494 	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1495 	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1496 #ifdef COMPAT_12
1497 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1498 #endif	/* COMPAT_12 */
1499 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1500 	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1501 	{ NULL,			IGN, IGN, 0 }
1502 };
1503 
1504 void
1505 arm8_setup(args)
1506 	char *args;
1507 {
1508 	int integer;
1509 	int cpuctrl, cpuctrlmask;
1510 	int clocktest;
1511 	int setclock = 0;
1512 
1513 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1514 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1515 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1516 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1517 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1518 		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1519 		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1520 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1521 
1522 	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1523 	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1524 
1525 	/* Get clock configuration */
1526 	clocktest = arm8_clock_config(0, 0) & 0x0f;
1527 
1528 	/* Special ARM8 clock and test configuration */
1529 	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1530 		clocktest = 0;
1531 		setclock = 1;
1532 	}
1533 	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1534 		if (integer)
1535 			clocktest |= 0x01;
1536 		else
1537 			clocktest &= ~(0x01);
1538 		setclock = 1;
1539 	}
1540 	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1541 		if (integer)
1542 			clocktest |= 0x02;
1543 		else
1544 			clocktest &= ~(0x02);
1545 		setclock = 1;
1546 	}
1547 	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1548 		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1549 		setclock = 1;
1550 	}
1551 	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1552 		clocktest |= (integer & 7) << 5;
1553 		setclock = 1;
1554 	}
1555 
1556 	/* Clear out the cache */
1557 	cpu_idcache_wbinv_all();
1558 
1559 	/* Set the control register */
1560 	curcpu()->ci_ctrl = cpuctrl;
1561 	cpu_control(0xffffffff, cpuctrl);
1562 
1563 	/* Set the clock/test register */
1564 	if (setclock)
1565 		arm8_clock_config(0x7f, clocktest);
1566 }
1567 #endif	/* CPU_ARM8 */
1568 
1569 #ifdef CPU_ARM9
1570 struct cpu_option arm9_options[] = {
1571 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1572 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1573 	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1574 	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1575 	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1576 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1577 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1578 	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1579 	{ NULL,			IGN, IGN, 0 }
1580 };
1581 
1582 void
1583 arm9_setup(args)
1584 	char *args;
1585 {
1586 	int cpuctrl, cpuctrlmask;
1587 
1588 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1589 	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1590 	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1591 	    | CPU_CONTROL_WBUF_ENABLE;
1592 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1593 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1594 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1595 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1596 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1597 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1598 		 | CPU_CONTROL_CPCLK;
1599 
1600 	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1601 
1602 	/* Clear out the cache */
1603 	cpu_idcache_wbinv_all();
1604 
1605 	/* Set the control register */
1606 	curcpu()->ci_ctrl = cpuctrl;
1607 	cpu_control(0xffffffff, cpuctrl);
1608 
1609 }
1610 #endif	/* CPU_ARM9 */
1611 
1612 #ifdef CPU_SA110
1613 struct cpu_option sa110_options[] = {
1614 #ifdef COMPAT_12
1615 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1616 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1617 #endif	/* COMPAT_12 */
1618 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1619 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1620 	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1621 	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1622 	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1623 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1624 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1625 	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1626 	{ NULL,			IGN, IGN, 0 }
1627 };
1628 
1629 void
1630 sa110_setup(args)
1631 	char *args;
1632 {
1633 	int cpuctrl, cpuctrlmask;
1634 
1635 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1636 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1637 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1638 		 | CPU_CONTROL_WBUF_ENABLE;
1639 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1640 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1641 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1642 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1643 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1644 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1645 		 | CPU_CONTROL_CPCLK;
1646 
1647 	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1648 
1649 	/* Clear out the cache */
1650 	cpu_idcache_wbinv_all();
1651 
1652 	/* Set the control register */
1653 	curcpu()->ci_ctrl = cpuctrl;
1654 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1655 	cpu_control(0xffffffff, cpuctrl);
1656 
1657 	/*
1658 	 * enable clockswitching, note that this doesn't read or write to r0,
1659 	 * r0 is just to make it valid asm
1660 	 */
1661 	__asm ("mcr 15, 0, r0, c15, c1, 2");
1662 }
1663 #endif	/* CPU_SA110 */
1664 
1665 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1666 struct cpu_option sa11x0_options[] = {
1667 #ifdef COMPAT_12
1668 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1669 	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1670 #endif	/* COMPAT_12 */
1671 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1672 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1673 	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1674 	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1675 	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1676 	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1677 	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1678 	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1679 	{ NULL,			IGN, IGN, 0 }
1680 };
1681 
1682 void
1683 sa11x0_setup(args)
1684 	char *args;
1685 {
1686 	int cpuctrl, cpuctrlmask;
1687 
1688 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1689 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1690 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1691 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1692 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1693 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1694 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1695 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1696 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1697 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1698 		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1699 
1700 	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1701 
1702 	/* Clear out the cache */
1703 	cpu_idcache_wbinv_all();
1704 
1705 	/* Set the control register */
1706 	cpu_control(0xffffffff, cpuctrl);
1707 }
1708 #endif	/* CPU_SA1100 || CPU_SA1110 */
1709 
1710 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1711     defined(CPU_XSCALE_PXA2X0)
1712 struct cpu_option xscale_options[] = {
1713 #ifdef COMPAT_12
1714 	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1715 	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1716 #endif	/* COMPAT_12 */
1717 	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1718 	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1719 	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1720 	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1721 	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1722 	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1723 	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1724 	{ NULL,			IGN, IGN, 0 }
1725 };
1726 
1727 void
1728 xscale_setup(args)
1729 	char *args;
1730 {
1731 	int cpuctrl, cpuctrlmask;
1732 
1733 	/*
1734 	 * The XScale Write Buffer is always enabled.  Our option
1735 	 * is to enable/disable coalescing.  Note that bits 6:3
1736 	 * must always be enabled.
1737 	 */
1738 
1739 	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1740 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1741 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1742 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1743 	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1744 		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1745 		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1746 		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1747 		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1748 		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1749 		 | CPU_CONTROL_CPCLK;
1750 
1751 	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1752 
1753 	/* Clear out the cache */
1754 	cpu_idcache_wbinv_all();
1755 
1756 	/*
1757 	 * Set the control register.  Note that bits 6:3 must always
1758 	 * be set to 1.
1759 	 */
1760 	curcpu()->ci_ctrl = cpuctrl;
1761 /*	cpu_control(cpuctrlmask, cpuctrl);*/
1762 	cpu_control(0xffffffff, cpuctrl);
1763 
1764 #if 0
1765 	/*
1766 	 * XXX FIXME
1767 	 * Disable write buffer coalescing, PT ECC, and set
1768 	 * the mini-cache to write-back/read-allocate.
1769 	 */
1770 	__asm ("mcr p15, 0, %0, c1, c0, 1" :: "r" (0));
1771 #endif
1772 }
1773 #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */
1774