1 /* $NetBSD: cpufunc.c,v 1.185 2022/12/22 06:58:07 ryo Exp $ */
2
3 /*
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * arm11 support code Copyright (c) 2007 Microsoft
9 * cortexa8 support code Copyright (c) 2008 3am Software Foundry
10 * cortexa8 improvements Copyright (c) Goeran Weinholt
11 * Copyright (c) 1997 Mark Brinicombe.
12 * Copyright (c) 1997 Causality Limited
13 * All rights reserved.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Causality Limited.
26 * 4. The name of Causality Limited may not be used to endorse or promote
27 * products derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
31 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * RiscBSD kernel project
43 *
44 * cpufuncs.c
45 *
46 * C functions for supporting CPU / MMU / TLB specific operations.
47 *
48 * Created : 30/01/97
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.185 2022/12/22 06:58:07 ryo Exp $");
53
54 #include "opt_arm_start.h"
55 #include "opt_compat_netbsd.h"
56 #include "opt_cpuoptions.h"
57 #include "opt_cputypes.h"
58 #include "opt_multiprocessor.h"
59
60 #include <sys/types.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <machine/cpu.h>
64 #include <machine/bootconfig.h>
65 #include <arch/arm/arm/disassem.h>
66
67 #include <uvm/uvm.h>
68
69 #include <arm/cpufunc_proto.h>
70 #include <arm/cpuconf.h>
71 #include <arm/locore.h>
72
73 #ifdef CPU_XSCALE_80200
74 #include <arm/xscale/i80200reg.h>
75 #include <arm/xscale/i80200var.h>
76 #endif
77
78 #ifdef CPU_XSCALE_80321
79 #include <arm/xscale/i80321reg.h>
80 #include <arm/xscale/i80321var.h>
81 #endif
82
83 #ifdef CPU_XSCALE_IXP425
84 #include <arm/xscale/ixp425reg.h>
85 #include <arm/xscale/ixp425var.h>
86 #endif
87
88 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
89 #include <arm/xscale/xscalereg.h>
90 #endif
91
92 #if defined(CPU_PJ4B)
93 #include "opt_mvsoc.h"
94 #include <machine/bus_defs.h>
95 #if defined(ARMADAXP)
96 #include <arm/marvell/armadaxpreg.h>
97 #include <arm/marvell/armadaxpvar.h>
98 #endif
99 #endif
100
101 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6))
102 bool cpu_armv7_p;
103 #endif
104
105 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6))
106 bool cpu_armv6_p;
107 #endif
108
109
110 /* PRIMARY CACHE VARIABLES */
111 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
112 u_int arm_cache_prefer_mask;
113 #endif
114 struct arm_cache_info arm_pcache;
115 struct arm_cache_info arm_scache;
116
117 u_int arm_dcache_align;
118 u_int arm_dcache_align_mask;
119
120 // Define a TTB value that can never be used.
121 uint32_t cpu_ttb = ~0;
122
123 /* 1 == use cpu_sleep(), 0 == don't */
124 int cpu_do_powersave;
125
126 #ifdef CPU_ARM6
127 struct cpu_functions arm6_cpufuncs = {
128 /* CPU functions */
129
130 .cf_id = cpufunc_id,
131 .cf_cpwait = cpufunc_nullop,
132
133 /* MMU functions */
134
135 .cf_control = cpufunc_control,
136 .cf_domains = cpufunc_domains,
137 .cf_setttb = arm67_setttb,
138 .cf_faultstatus = cpufunc_faultstatus,
139 .cf_faultaddress = cpufunc_faultaddress,
140
141 /* TLB functions */
142
143 .cf_tlb_flushID = arm67_tlb_flush,
144 .cf_tlb_flushID_SE = arm67_tlb_purge,
145 .cf_tlb_flushI = arm67_tlb_flush,
146 .cf_tlb_flushI_SE = arm67_tlb_purge,
147 .cf_tlb_flushD = arm67_tlb_flush,
148 .cf_tlb_flushD_SE = arm67_tlb_purge,
149
150 /* Cache operations */
151
152 .cf_icache_sync_all = cpufunc_nullop,
153 .cf_icache_sync_range = (void *) cpufunc_nullop,
154
155 .cf_dcache_wbinv_all = arm67_cache_flush,
156 .cf_dcache_wbinv_range = (void *)arm67_cache_flush,
157 .cf_dcache_inv_range = (void *)arm67_cache_flush,
158 .cf_dcache_wb_range = (void *)cpufunc_nullop,
159
160 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
161 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
162 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
163
164 .cf_idcache_wbinv_all = arm67_cache_flush,
165 .cf_idcache_wbinv_range = (void *)arm67_cache_flush,
166
167 /* Other functions */
168
169 .cf_flush_prefetchbuf = cpufunc_nullop,
170 .cf_drain_writebuf = cpufunc_nullop,
171 .cf_flush_brnchtgt_C = cpufunc_nullop,
172 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
173
174 .cf_sleep = (void *)cpufunc_nullop,
175
176 /* Soft functions */
177
178 #ifdef ARM6_LATE_ABORT
179 .cf_dataabt_fixup = late_abort_fixup,
180 #else
181 .cf_dataabt_fixup = early_abort_fixup,
182 #endif
183 .cf_prefetchabt_fixup = cpufunc_null_fixup,
184
185 .cf_context_switch = arm67_context_switch,
186
187 .cf_setup = arm6_setup
188
189 };
190 #endif /* CPU_ARM6 */
191
192 #ifdef CPU_ARM7
193 struct cpu_functions arm7_cpufuncs = {
194 /* CPU functions */
195
196 .cf_id = cpufunc_id,
197 .cf_cpwait = cpufunc_nullop,
198
199 /* MMU functions */
200
201 .cf_control = cpufunc_control,
202 .cf_domains = cpufunc_domains,
203 .cf_setttb = arm67_setttb,
204 .cf_faultstatus = cpufunc_faultstatus,
205 .cf_faultaddress = cpufunc_faultaddress,
206
207 /* TLB functions */
208
209 .cf_tlb_flushID = arm67_tlb_flush,
210 .cf_tlb_flushID_SE = arm67_tlb_purge,
211 .cf_tlb_flushI = arm67_tlb_flush,
212 .cf_tlb_flushI_SE = arm67_tlb_purge,
213 .cf_tlb_flushD = arm67_tlb_flush,
214 .cf_tlb_flushD_SE = arm67_tlb_purge,
215
216 /* Cache operations */
217
218 .cf_icache_sync_all = cpufunc_nullop,
219 .cf_icache_sync_range = (void *)cpufunc_nullop,
220
221 .cf_dcache_wbinv_all = arm67_cache_flush,
222 .cf_dcache_wbinv_range = (void *)arm67_cache_flush,
223 .cf_dcache_inv_range = (void *)arm67_cache_flush,
224 .cf_dcache_wb_range = (void *)cpufunc_nullop,
225
226 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
227 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
228 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
229
230 .cf_idcache_wbinv_all = arm67_cache_flush,
231 .cf_idcache_wbinv_range = (void *)arm67_cache_flush,
232
233 /* Other functions */
234
235 .cf_flush_prefetchbuf = cpufunc_nullop,
236 .cf_drain_writebuf = cpufunc_nullop,
237 .cf_flush_brnchtgt_C = cpufunc_nullop,
238 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
239
240 .cf_sleep = (void *)cpufunc_nullop,
241
242 /* Soft functions */
243
244 .cf_dataabt_fixup = late_abort_fixup,
245 .cf_prefetchabt_fixup = cpufunc_null_fixup,
246
247 .cf_context_switch = arm67_context_switch,
248
249 .cf_setup = arm7_setup
250
251 };
252 #endif /* CPU_ARM7 */
253
254 #ifdef CPU_ARM7TDMI
255 struct cpu_functions arm7tdmi_cpufuncs = {
256 /* CPU functions */
257
258 .cf_id = cpufunc_id,
259 .cf_cpwait = cpufunc_nullop,
260
261 /* MMU functions */
262
263 .cf_control = cpufunc_control,
264 .cf_domains = cpufunc_domains,
265 .cf_setttb = arm7tdmi_setttb,
266 .cf_faultstatus = cpufunc_faultstatus,
267 .cf_faultaddress = cpufunc_faultaddress,
268
269 /* TLB functions */
270
271 .cf_tlb_flushID = arm7tdmi_tlb_flushID,
272 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE,
273 .cf_tlb_flushI = arm7tdmi_tlb_flushID,
274 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE,
275 .cf_tlb_flushD = arm7tdmi_tlb_flushID,
276 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE,
277
278 /* Cache operations */
279
280 .cf_icache_sync_all = cpufunc_nullop,
281 .cf_icache_sync_range = (void *)cpufunc_nullop,
282
283 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID,
284 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID,
285 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID,
286 .cf_dcache_wb_range = (void *)cpufunc_nullop,
287
288 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
289 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
290 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
291
292 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID,
293 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID,
294
295 /* Other functions */
296
297 .cf_flush_prefetchbuf = cpufunc_nullop,
298 .cf_drain_writebuf = cpufunc_nullop,
299 .cf_flush_brnchtgt_C = cpufunc_nullop,
300 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
301
302 .cf_sleep = (void *)cpufunc_nullop,
303
304 /* Soft functions */
305
306 .cf_dataabt_fixup = late_abort_fixup,
307 .cf_prefetchabt_fixup = cpufunc_null_fixup,
308
309 .cf_context_switch = arm7tdmi_context_switch,
310
311 .cf_setup = arm7tdmi_setup
312
313 };
314 #endif /* CPU_ARM7TDMI */
315
316 #ifdef CPU_ARM8
317 struct cpu_functions arm8_cpufuncs = {
318 /* CPU functions */
319
320 .cf_id = cpufunc_id,
321 .cf_cpwait = cpufunc_nullop,
322
323 /* MMU functions */
324
325 .cf_control = cpufunc_control,
326 .cf_domains = cpufunc_domains,
327 .cf_setttb = arm8_setttb,
328 .cf_faultstatus = cpufunc_faultstatus,
329 .cf_faultaddress = cpufunc_faultaddress,
330
331 /* TLB functions */
332
333 .cf_tlb_flushID = arm8_tlb_flushID,
334 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE,
335 .cf_tlb_flushI = arm8_tlb_flushID,
336 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE,
337 .cf_tlb_flushD = arm8_tlb_flushID,
338 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE,
339
340 /* Cache operations */
341
342 .cf_icache_sync_all = cpufunc_nullop,
343 .cf_icache_sync_range = (void *)cpufunc_nullop,
344
345 .cf_dcache_wbinv_all = arm8_cache_purgeID,
346 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID,
347 /*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID,
348 .cf_dcache_wb_range = (void *)arm8_cache_cleanID,
349
350 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
351 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
352 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
353
354 .cf_idcache_wbinv_all = arm8_cache_purgeID,
355 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID,
356
357 /* Other functions */
358
359 .cf_flush_prefetchbuf = cpufunc_nullop,
360 .cf_drain_writebuf = cpufunc_nullop,
361 .cf_flush_brnchtgt_C = cpufunc_nullop,
362 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
363
364 .cf_sleep = (void *)cpufunc_nullop,
365
366 /* Soft functions */
367
368 .cf_dataabt_fixup = cpufunc_null_fixup,
369 .cf_prefetchabt_fixup = cpufunc_null_fixup,
370
371 .cf_context_switch = arm8_context_switch,
372
373 .cf_setup = arm8_setup
374 };
375 #endif /* CPU_ARM8 */
376
377 #ifdef CPU_ARM9
378 struct cpu_functions arm9_cpufuncs = {
379 /* CPU functions */
380
381 .cf_id = cpufunc_id,
382 .cf_cpwait = cpufunc_nullop,
383
384 /* MMU functions */
385
386 .cf_control = cpufunc_control,
387 .cf_domains = cpufunc_domains,
388 .cf_setttb = arm9_setttb,
389 .cf_faultstatus = cpufunc_faultstatus,
390 .cf_faultaddress = cpufunc_faultaddress,
391
392 /* TLB functions */
393
394 .cf_tlb_flushID = armv4_tlb_flushID,
395 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE,
396 .cf_tlb_flushI = armv4_tlb_flushI,
397 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI,
398 .cf_tlb_flushD = armv4_tlb_flushD,
399 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
400
401 /* Cache operations */
402
403 .cf_icache_sync_all = arm9_icache_sync_all,
404 .cf_icache_sync_range = arm9_icache_sync_range,
405
406 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all,
407 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range,
408 /*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range,
409 .cf_dcache_wb_range = arm9_dcache_wb_range,
410
411 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
412 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
413 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
414
415 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all,
416 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range,
417
418 /* Other functions */
419
420 .cf_flush_prefetchbuf = cpufunc_nullop,
421 .cf_drain_writebuf = armv4_drain_writebuf,
422 .cf_flush_brnchtgt_C = cpufunc_nullop,
423 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
424
425 .cf_sleep = (void *)cpufunc_nullop,
426
427 /* Soft functions */
428
429 .cf_dataabt_fixup = cpufunc_null_fixup,
430 .cf_prefetchabt_fixup = cpufunc_null_fixup,
431
432 .cf_context_switch = arm9_context_switch,
433
434 .cf_setup = arm9_setup
435
436 };
437 #endif /* CPU_ARM9 */
438
439 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
440 struct cpu_functions armv5_ec_cpufuncs = {
441 /* CPU functions */
442
443 .cf_id = cpufunc_id,
444 .cf_cpwait = cpufunc_nullop,
445
446 /* MMU functions */
447
448 .cf_control = cpufunc_control,
449 .cf_domains = cpufunc_domains,
450 .cf_setttb = armv5_ec_setttb,
451 .cf_faultstatus = cpufunc_faultstatus,
452 .cf_faultaddress = cpufunc_faultaddress,
453
454 /* TLB functions */
455
456 .cf_tlb_flushID = armv4_tlb_flushID,
457 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE,
458 .cf_tlb_flushI = armv4_tlb_flushI,
459 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE,
460 .cf_tlb_flushD = armv4_tlb_flushD,
461 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
462
463 /* Cache operations */
464
465 .cf_icache_sync_all = armv5_ec_icache_sync_all,
466 .cf_icache_sync_range = armv5_ec_icache_sync_range,
467
468 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all,
469 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range,
470 /*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range,
471 .cf_dcache_wb_range = armv5_ec_dcache_wb_range,
472
473 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
474 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
475 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
476
477 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all,
478 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range,
479
480 /* Other functions */
481
482 .cf_flush_prefetchbuf = cpufunc_nullop,
483 .cf_drain_writebuf = armv4_drain_writebuf,
484 .cf_flush_brnchtgt_C = cpufunc_nullop,
485 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
486
487 .cf_sleep = (void *)cpufunc_nullop,
488
489 /* Soft functions */
490
491 .cf_dataabt_fixup = cpufunc_null_fixup,
492 .cf_prefetchabt_fixup = cpufunc_null_fixup,
493
494 .cf_context_switch = arm10_context_switch,
495
496 .cf_setup = arm10_setup
497
498 };
499 #endif /* CPU_ARM9E || CPU_ARM10 */
500
501 #ifdef CPU_ARM10
502 struct cpu_functions arm10_cpufuncs = {
503 /* CPU functions */
504
505 .cf_id = cpufunc_id,
506 .cf_cpwait = cpufunc_nullop,
507
508 /* MMU functions */
509
510 .cf_control = cpufunc_control,
511 .cf_domains = cpufunc_domains,
512 .cf_setttb = armv5_setttb,
513 .cf_faultstatus = cpufunc_faultstatus,
514 .cf_faultaddress = cpufunc_faultaddress,
515
516 /* TLB functions */
517
518 .cf_tlb_flushID = armv4_tlb_flushID,
519 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE,
520 .cf_tlb_flushI = armv4_tlb_flushI,
521 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE,
522 .cf_tlb_flushD = armv4_tlb_flushD,
523 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
524
525 /* Cache operations */
526
527 .cf_icache_sync_all = armv5_icache_sync_all,
528 .cf_icache_sync_range = armv5_icache_sync_range,
529
530 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all,
531 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range,
532 /*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range,
533 .cf_dcache_wb_range = armv5_dcache_wb_range,
534
535 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
536 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
537 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
538
539 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all,
540 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
541
542 /* Other functions */
543
544 .cf_flush_prefetchbuf = cpufunc_nullop,
545 .cf_drain_writebuf = armv4_drain_writebuf,
546 .cf_flush_brnchtgt_C = cpufunc_nullop,
547 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
548
549 .cf_sleep = (void *)cpufunc_nullop,
550
551 /* Soft functions */
552
553 .cf_dataabt_fixup = cpufunc_null_fixup,
554 .cf_prefetchabt_fixup = cpufunc_null_fixup,
555
556 .cf_context_switch = arm10_context_switch,
557
558 .cf_setup = arm10_setup
559
560 };
561 #endif /* CPU_ARM10 */
562
563 #ifdef CPU_ARM11
564 struct cpu_functions arm11_cpufuncs = {
565 /* CPU functions */
566
567 .cf_id = cpufunc_id,
568 .cf_cpwait = cpufunc_nullop,
569
570 /* MMU functions */
571
572 .cf_control = cpufunc_control,
573 .cf_domains = cpufunc_domains,
574 .cf_setttb = arm11_setttb,
575 .cf_faultstatus = cpufunc_faultstatus,
576 .cf_faultaddress = cpufunc_faultaddress,
577
578 /* TLB functions */
579
580 .cf_tlb_flushID = arm11_tlb_flushID,
581 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
582 .cf_tlb_flushI = arm11_tlb_flushI,
583 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
584 .cf_tlb_flushD = arm11_tlb_flushD,
585 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
586
587 /* Cache operations */
588
589 .cf_icache_sync_all = armv6_icache_sync_all,
590 .cf_icache_sync_range = armv6_icache_sync_range,
591
592 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all,
593 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range,
594 .cf_dcache_inv_range = armv6_dcache_inv_range,
595 .cf_dcache_wb_range = armv6_dcache_wb_range,
596
597 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
598 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
599 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
600
601 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all,
602 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range,
603
604 /* Other functions */
605
606 .cf_flush_prefetchbuf = cpufunc_nullop,
607 .cf_drain_writebuf = arm11_drain_writebuf,
608 .cf_flush_brnchtgt_C = cpufunc_nullop,
609 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
610
611 .cf_sleep = arm11_sleep,
612
613 /* Soft functions */
614
615 .cf_dataabt_fixup = cpufunc_null_fixup,
616 .cf_prefetchabt_fixup = cpufunc_null_fixup,
617
618 .cf_context_switch = arm11_context_switch,
619
620 .cf_setup = arm11_setup
621
622 };
623 #endif /* CPU_ARM11 */
624
625 #ifdef CPU_ARM1136
626 struct cpu_functions arm1136_cpufuncs = {
627 /* CPU functions */
628
629 .cf_id = cpufunc_id,
630 .cf_cpwait = cpufunc_nullop,
631
632 /* MMU functions */
633
634 .cf_control = cpufunc_control,
635 .cf_domains = cpufunc_domains,
636 .cf_setttb = arm11_setttb,
637 .cf_faultstatus = cpufunc_faultstatus,
638 .cf_faultaddress = cpufunc_faultaddress,
639
640 /* TLB functions */
641
642 .cf_tlb_flushID = arm11_tlb_flushID,
643 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
644 .cf_tlb_flushI = arm11_tlb_flushI,
645 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
646 .cf_tlb_flushD = arm11_tlb_flushD,
647 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
648
649 /* Cache operations */
650
651 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 411920 */
652 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371025 */
653
654 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 411920 */
655 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range,
656 .cf_dcache_inv_range = armv6_dcache_inv_range,
657 .cf_dcache_wb_range = armv6_dcache_wb_range,
658
659 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
660 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
661 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
662
663 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 411920 */
664 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371025 */
665
666 /* Other functions */
667
668 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf,
669 .cf_drain_writebuf = arm11_drain_writebuf,
670 .cf_flush_brnchtgt_C = cpufunc_nullop,
671 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
672
673 .cf_sleep = arm11_sleep, /* arm1136_sleep_rev0 */
674
675 /* Soft functions */
676
677 .cf_dataabt_fixup = cpufunc_null_fixup,
678 .cf_prefetchabt_fixup = cpufunc_null_fixup,
679
680 .cf_context_switch = arm11_context_switch,
681
682 .cf_setup = arm11x6_setup
683
684 };
685 #endif /* CPU_ARM1136 */
686
687 #ifdef CPU_ARM1176
688 struct cpu_functions arm1176_cpufuncs = {
689 /* CPU functions */
690
691 .cf_id = cpufunc_id,
692 .cf_cpwait = cpufunc_nullop,
693
694 /* MMU functions */
695
696 .cf_control = cpufunc_control,
697 .cf_domains = cpufunc_domains,
698 .cf_setttb = arm11_setttb,
699 .cf_faultstatus = cpufunc_faultstatus,
700 .cf_faultaddress = cpufunc_faultaddress,
701
702 /* TLB functions */
703
704 .cf_tlb_flushID = arm11_tlb_flushID,
705 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
706 .cf_tlb_flushI = arm11_tlb_flushI,
707 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
708 .cf_tlb_flushD = arm11_tlb_flushD,
709 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
710
711 /* Cache operations */
712
713 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 415045 */
714 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371367 */
715
716 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 415045 */
717 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range,
718 .cf_dcache_inv_range = armv6_dcache_inv_range,
719 .cf_dcache_wb_range = armv6_dcache_wb_range,
720
721 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
722 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
723 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
724
725 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 415045 */
726 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371367 */
727
728 /* Other functions */
729
730 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf,
731 .cf_drain_writebuf = arm11_drain_writebuf,
732 .cf_flush_brnchtgt_C = cpufunc_nullop,
733 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
734
735 .cf_sleep = arm11x6_sleep, /* no ref. */
736
737 /* Soft functions */
738
739 .cf_dataabt_fixup = cpufunc_null_fixup,
740 .cf_prefetchabt_fixup = cpufunc_null_fixup,
741
742 .cf_context_switch = arm11_context_switch,
743
744 .cf_setup = arm11x6_setup
745
746 };
747 #endif /* CPU_ARM1176 */
748
749
750 #ifdef CPU_ARM11MPCORE
751 struct cpu_functions arm11mpcore_cpufuncs = {
752 /* CPU functions */
753
754 .cf_id = cpufunc_id,
755 .cf_cpwait = cpufunc_nullop,
756
757 /* MMU functions */
758
759 .cf_control = cpufunc_control,
760 .cf_domains = cpufunc_domains,
761 .cf_setttb = arm11_setttb,
762 .cf_faultstatus = cpufunc_faultstatus,
763 .cf_faultaddress = cpufunc_faultaddress,
764
765 /* TLB functions */
766
767 .cf_tlb_flushID = arm11_tlb_flushID,
768 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE,
769 .cf_tlb_flushI = arm11_tlb_flushI,
770 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE,
771 .cf_tlb_flushD = arm11_tlb_flushD,
772 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE,
773
774 /* Cache operations */
775
776 .cf_icache_sync_all = armv6_icache_sync_all,
777 .cf_icache_sync_range = armv5_icache_sync_range,
778
779 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all,
780 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range,
781 .cf_dcache_inv_range = armv5_dcache_inv_range,
782 .cf_dcache_wb_range = armv5_dcache_wb_range,
783
784 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
785 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
786 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
787
788 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all,
789 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range,
790
791 /* Other functions */
792
793 .cf_flush_prefetchbuf = cpufunc_nullop,
794 .cf_drain_writebuf = arm11_drain_writebuf,
795 .cf_flush_brnchtgt_C = cpufunc_nullop,
796 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
797
798 .cf_sleep = arm11_sleep,
799
800 /* Soft functions */
801
802 .cf_dataabt_fixup = cpufunc_null_fixup,
803 .cf_prefetchabt_fixup = cpufunc_null_fixup,
804
805 .cf_context_switch = arm11_context_switch,
806
807 .cf_setup = arm11mpcore_setup
808
809 };
810 #endif /* CPU_ARM11MPCORE */
811
812 #ifdef CPU_SA110
813 struct cpu_functions sa110_cpufuncs = {
814 /* CPU functions */
815
816 .cf_id = cpufunc_id,
817 .cf_cpwait = cpufunc_nullop,
818
819 /* MMU functions */
820
821 .cf_control = cpufunc_control,
822 .cf_domains = cpufunc_domains,
823 .cf_setttb = sa1_setttb,
824 .cf_faultstatus = cpufunc_faultstatus,
825 .cf_faultaddress = cpufunc_faultaddress,
826
827 /* TLB functions */
828
829 .cf_tlb_flushID = armv4_tlb_flushID,
830 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE,
831 .cf_tlb_flushI = armv4_tlb_flushI,
832 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI,
833 .cf_tlb_flushD = armv4_tlb_flushD,
834 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
835
836 /* Cache operations */
837
838 .cf_icache_sync_all = sa1_cache_syncI,
839 .cf_icache_sync_range = sa1_cache_syncI_rng,
840
841 .cf_dcache_wbinv_all = sa1_cache_purgeD,
842 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng,
843 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng,
844 .cf_dcache_wb_range = sa1_cache_cleanD_rng,
845
846 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
847 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
848 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
849
850 .cf_idcache_wbinv_all = sa1_cache_purgeID,
851 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng,
852
853 /* Other functions */
854
855 .cf_flush_prefetchbuf = cpufunc_nullop,
856 .cf_drain_writebuf = armv4_drain_writebuf,
857 .cf_flush_brnchtgt_C = cpufunc_nullop,
858 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
859
860 .cf_sleep = (void *)cpufunc_nullop,
861
862 /* Soft functions */
863
864 .cf_dataabt_fixup = cpufunc_null_fixup,
865 .cf_prefetchabt_fixup = cpufunc_null_fixup,
866
867 .cf_context_switch = sa110_context_switch,
868
869 .cf_setup = sa110_setup
870 };
871 #endif /* CPU_SA110 */
872
873 #if defined(CPU_SA1100) || defined(CPU_SA1110)
874 struct cpu_functions sa11x0_cpufuncs = {
875 /* CPU functions */
876
877 .cf_id = cpufunc_id,
878 .cf_cpwait = cpufunc_nullop,
879
880 /* MMU functions */
881
882 .cf_control = cpufunc_control,
883 .cf_domains = cpufunc_domains,
884 .cf_setttb = sa1_setttb,
885 .cf_faultstatus = cpufunc_faultstatus,
886 .cf_faultaddress = cpufunc_faultaddress,
887
888 /* TLB functions */
889
890 .cf_tlb_flushID = armv4_tlb_flushID,
891 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE,
892 .cf_tlb_flushI = armv4_tlb_flushI,
893 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI,
894 .cf_tlb_flushD = armv4_tlb_flushD,
895 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
896
897 /* Cache operations */
898
899 .cf_icache_sync_all = sa1_cache_syncI,
900 .cf_icache_sync_range = sa1_cache_syncI_rng,
901
902 .cf_dcache_wbinv_all = sa1_cache_purgeD,
903 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng,
904 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng,
905 .cf_dcache_wb_range = sa1_cache_cleanD_rng,
906
907 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
908 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
909 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
910
911 .cf_idcache_wbinv_all = sa1_cache_purgeID,
912 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng,
913
914 /* Other functions */
915
916 .cf_flush_prefetchbuf = sa11x0_drain_readbuf,
917 .cf_drain_writebuf = armv4_drain_writebuf,
918 .cf_flush_brnchtgt_C = cpufunc_nullop,
919 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
920
921 .cf_sleep = sa11x0_cpu_sleep,
922
923 /* Soft functions */
924
925 .cf_dataabt_fixup = cpufunc_null_fixup,
926 .cf_prefetchabt_fixup = cpufunc_null_fixup,
927
928 .cf_context_switch = sa11x0_context_switch,
929
930 .cf_setup = sa11x0_setup
931 };
932 #endif /* CPU_SA1100 || CPU_SA1110 */
933
934 #if defined(CPU_FA526)
935 struct cpu_functions fa526_cpufuncs = {
936 /* CPU functions */
937
938 .cf_id = cpufunc_id,
939 .cf_cpwait = cpufunc_nullop,
940
941 /* MMU functions */
942
943 .cf_control = cpufunc_control,
944 .cf_domains = cpufunc_domains,
945 .cf_setttb = fa526_setttb,
946 .cf_faultstatus = cpufunc_faultstatus,
947 .cf_faultaddress = cpufunc_faultaddress,
948
949 /* TLB functions */
950
951 .cf_tlb_flushID = armv4_tlb_flushID,
952 .cf_tlb_flushID_SE = fa526_tlb_flushID_SE,
953 .cf_tlb_flushI = armv4_tlb_flushI,
954 .cf_tlb_flushI_SE = fa526_tlb_flushI_SE,
955 .cf_tlb_flushD = armv4_tlb_flushD,
956 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
957
958 /* Cache operations */
959
960 .cf_icache_sync_all = fa526_icache_sync_all,
961 .cf_icache_sync_range = fa526_icache_sync_range,
962
963 .cf_dcache_wbinv_all = fa526_dcache_wbinv_all,
964 .cf_dcache_wbinv_range = fa526_dcache_wbinv_range,
965 .cf_dcache_inv_range = fa526_dcache_inv_range,
966 .cf_dcache_wb_range = fa526_dcache_wb_range,
967
968 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
969 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
970 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
971
972 .cf_idcache_wbinv_all = fa526_idcache_wbinv_all,
973 .cf_idcache_wbinv_range = fa526_idcache_wbinv_range,
974
975 /* Other functions */
976
977 .cf_flush_prefetchbuf = fa526_flush_prefetchbuf,
978 .cf_drain_writebuf = armv4_drain_writebuf,
979 .cf_flush_brnchtgt_C = cpufunc_nullop,
980 .cf_flush_brnchtgt_E = fa526_flush_brnchtgt_E,
981
982 .cf_sleep = fa526_cpu_sleep,
983
984 /* Soft functions */
985
986 .cf_dataabt_fixup = cpufunc_null_fixup,
987 .cf_prefetchabt_fixup = cpufunc_null_fixup,
988
989 .cf_context_switch = fa526_context_switch,
990
991 .cf_setup = fa526_setup
992 };
993 #endif /* CPU_FA526 */
994
995 #ifdef CPU_IXP12X0
996 struct cpu_functions ixp12x0_cpufuncs = {
997 /* CPU functions */
998
999 .cf_id = cpufunc_id,
1000 .cf_cpwait = cpufunc_nullop,
1001
1002 /* MMU functions */
1003
1004 .cf_control = cpufunc_control,
1005 .cf_domains = cpufunc_domains,
1006 .cf_setttb = sa1_setttb,
1007 .cf_faultstatus = cpufunc_faultstatus,
1008 .cf_faultaddress = cpufunc_faultaddress,
1009
1010 /* TLB functions */
1011
1012 .cf_tlb_flushID = armv4_tlb_flushID,
1013 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE,
1014 .cf_tlb_flushI = armv4_tlb_flushI,
1015 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI,
1016 .cf_tlb_flushD = armv4_tlb_flushD,
1017 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
1018
1019 /* Cache operations */
1020
1021 .cf_icache_sync_all = sa1_cache_syncI,
1022 .cf_icache_sync_range = sa1_cache_syncI_rng,
1023
1024 .cf_dcache_wbinv_all = sa1_cache_purgeD,
1025 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng,
1026 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng,
1027 .cf_dcache_wb_range = sa1_cache_cleanD_rng,
1028
1029 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
1030 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
1031 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
1032
1033 .cf_idcache_wbinv_all = sa1_cache_purgeID,
1034 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng,
1035
1036 /* Other functions */
1037
1038 .cf_flush_prefetchbuf = ixp12x0_drain_readbuf,
1039 .cf_drain_writebuf = armv4_drain_writebuf,
1040 .cf_flush_brnchtgt_C = cpufunc_nullop,
1041 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
1042
1043 .cf_sleep = (void *)cpufunc_nullop,
1044
1045 /* Soft functions */
1046
1047 .cf_dataabt_fixup = cpufunc_null_fixup,
1048 .cf_prefetchabt_fixup = cpufunc_null_fixup,
1049
1050 .cf_context_switch = ixp12x0_context_switch,
1051
1052 .cf_setup = ixp12x0_setup
1053 };
1054 #endif /* CPU_IXP12X0 */
1055
1056 #if defined(CPU_XSCALE)
1057 struct cpu_functions xscale_cpufuncs = {
1058 /* CPU functions */
1059
1060 .cf_id = cpufunc_id,
1061 .cf_cpwait = xscale_cpwait,
1062
1063 /* MMU functions */
1064
1065 .cf_control = xscale_control,
1066 .cf_domains = cpufunc_domains,
1067 .cf_setttb = xscale_setttb,
1068 .cf_faultstatus = cpufunc_faultstatus,
1069 .cf_faultaddress = cpufunc_faultaddress,
1070
1071 /* TLB functions */
1072
1073 .cf_tlb_flushID = armv4_tlb_flushID,
1074 .cf_tlb_flushID_SE = xscale_tlb_flushID_SE,
1075 .cf_tlb_flushI = armv4_tlb_flushI,
1076 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI,
1077 .cf_tlb_flushD = armv4_tlb_flushD,
1078 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
1079
1080 /* Cache operations */
1081
1082 .cf_icache_sync_all = xscale_cache_syncI,
1083 .cf_icache_sync_range = xscale_cache_syncI_rng,
1084
1085 .cf_dcache_wbinv_all = xscale_cache_purgeD,
1086 .cf_dcache_wbinv_range = xscale_cache_purgeD_rng,
1087 .cf_dcache_inv_range = xscale_cache_flushD_rng,
1088 .cf_dcache_wb_range = xscale_cache_cleanD_rng,
1089
1090 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
1091 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
1092 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
1093
1094 .cf_idcache_wbinv_all = xscale_cache_purgeID,
1095 .cf_idcache_wbinv_range = xscale_cache_purgeID_rng,
1096
1097 /* Other functions */
1098
1099 .cf_flush_prefetchbuf = cpufunc_nullop,
1100 .cf_drain_writebuf = armv4_drain_writebuf,
1101 .cf_flush_brnchtgt_C = cpufunc_nullop,
1102 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
1103
1104 .cf_sleep = xscale_cpu_sleep,
1105
1106 /* Soft functions */
1107
1108 .cf_dataabt_fixup = cpufunc_null_fixup,
1109 .cf_prefetchabt_fixup = cpufunc_null_fixup,
1110
1111 .cf_context_switch = xscale_context_switch,
1112
1113 .cf_setup = xscale_setup
1114 };
1115 #endif /* CPU_XSCALE */
1116
1117 #if defined(CPU_ARMV7)
1118 struct cpu_functions armv7_cpufuncs = {
1119 /* CPU functions */
1120
1121 .cf_id = cpufunc_id,
1122 .cf_cpwait = cpufunc_nullop,
1123
1124 /* MMU functions */
1125
1126 .cf_control = cpufunc_control,
1127 .cf_domains = cpufunc_domains,
1128 .cf_setttb = armv7_setttb,
1129 .cf_faultstatus = cpufunc_faultstatus,
1130 .cf_faultaddress = cpufunc_faultaddress,
1131
1132 /* TLB functions */
1133
1134 .cf_tlb_flushID = armv7up_tlb_flushID,
1135 .cf_tlb_flushID_SE = armv7up_tlb_flushID_SE,
1136 .cf_tlb_flushI = armv7up_tlb_flushI,
1137 .cf_tlb_flushI_SE = armv7up_tlb_flushI_SE,
1138 .cf_tlb_flushD = armv7up_tlb_flushD,
1139 .cf_tlb_flushD_SE = armv7up_tlb_flushD_SE,
1140
1141 /* Cache operations */
1142
1143 .cf_icache_sync_all = armv7_icache_sync_all,
1144 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all,
1145
1146 .cf_dcache_inv_range = armv7_dcache_inv_range,
1147 .cf_dcache_wb_range = armv7_dcache_wb_range,
1148 .cf_dcache_wbinv_range = armv7_dcache_wbinv_range,
1149
1150 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
1151 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
1152 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
1153
1154 .cf_icache_sync_range = armv7_icache_sync_range,
1155 .cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1156
1157
1158 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all,
1159
1160 /* Other functions */
1161
1162 .cf_flush_prefetchbuf = cpufunc_nullop,
1163 .cf_drain_writebuf = armv7_drain_writebuf,
1164 .cf_flush_brnchtgt_C = cpufunc_nullop,
1165 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
1166
1167 .cf_sleep = armv7_cpu_sleep,
1168
1169 /* Soft functions */
1170
1171 .cf_dataabt_fixup = cpufunc_null_fixup,
1172 .cf_prefetchabt_fixup = cpufunc_null_fixup,
1173
1174 .cf_context_switch = armv7_context_switch,
1175
1176 .cf_setup = armv7_setup
1177
1178 };
1179 #endif /* CPU_ARMV7 */
1180
1181 #ifdef CPU_PJ4B
1182 struct cpu_functions pj4bv7_cpufuncs = {
1183 /* CPU functions */
1184
1185 .cf_id = cpufunc_id,
1186 .cf_cpwait = armv7_drain_writebuf,
1187
1188 /* MMU functions */
1189
1190 .cf_control = cpufunc_control,
1191 .cf_domains = cpufunc_domains,
1192 .cf_setttb = armv7_setttb,
1193 .cf_faultstatus = cpufunc_faultstatus,
1194 .cf_faultaddress = cpufunc_faultaddress,
1195
1196 /* TLB functions */
1197
1198 .cf_tlb_flushID = armv7up_tlb_flushID,
1199 .cf_tlb_flushID_SE = armv7up_tlb_flushID_SE,
1200 .cf_tlb_flushI = armv7up_tlb_flushID,
1201 .cf_tlb_flushI_SE = armv7up_tlb_flushID_SE,
1202 .cf_tlb_flushD = armv7up_tlb_flushID,
1203 .cf_tlb_flushD_SE = armv7up_tlb_flushID_SE,
1204
1205 /* Cache operations (see also pj4bv7_setup) */
1206 .cf_icache_sync_all = armv7_idcache_wbinv_all,
1207 .cf_icache_sync_range = armv7_icache_sync_range,
1208
1209 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all,
1210 .cf_dcache_wbinv_range = armv7_dcache_wbinv_range,
1211 .cf_dcache_inv_range = armv7_dcache_inv_range,
1212 .cf_dcache_wb_range = armv7_dcache_wb_range,
1213
1214 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
1215 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
1216 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
1217
1218 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all,
1219 .cf_idcache_wbinv_range = armv7_idcache_wbinv_range,
1220
1221 /* Other functions */
1222
1223 .cf_flush_prefetchbuf = cpufunc_nullop,
1224 .cf_drain_writebuf = armv7_drain_writebuf,
1225 .cf_flush_brnchtgt_C = cpufunc_nullop,
1226 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
1227
1228 .cf_sleep = pj4b_cpu_sleep,
1229
1230 /* Soft functions */
1231
1232 .cf_dataabt_fixup = cpufunc_null_fixup,
1233 .cf_prefetchabt_fixup = cpufunc_null_fixup,
1234
1235 .cf_context_switch = armv7_context_switch,
1236
1237 .cf_setup = pj4bv7_setup
1238 };
1239 #endif /* CPU_PJ4B */
1240
1241 #ifdef CPU_SHEEVA
1242 struct cpu_functions sheeva_cpufuncs = {
1243 /* CPU functions */
1244
1245 .cf_id = cpufunc_id,
1246 .cf_cpwait = cpufunc_nullop,
1247
1248 /* MMU functions */
1249
1250 .cf_control = cpufunc_control,
1251 .cf_domains = cpufunc_domains,
1252 .cf_setttb = armv5_ec_setttb,
1253 .cf_faultstatus = cpufunc_faultstatus,
1254 .cf_faultaddress = cpufunc_faultaddress,
1255
1256 /* TLB functions */
1257
1258 .cf_tlb_flushID = armv4_tlb_flushID,
1259 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE,
1260 .cf_tlb_flushI = armv4_tlb_flushI,
1261 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE,
1262 .cf_tlb_flushD = armv4_tlb_flushD,
1263 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE,
1264
1265 /* Cache operations */
1266
1267 .cf_icache_sync_all = armv5_ec_icache_sync_all,
1268 .cf_icache_sync_range = armv5_ec_icache_sync_range,
1269
1270 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all,
1271 .cf_dcache_wbinv_range = sheeva_dcache_wbinv_range,
1272 .cf_dcache_inv_range = sheeva_dcache_inv_range,
1273 .cf_dcache_wb_range = sheeva_dcache_wb_range,
1274
1275 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop,
1276 .cf_sdcache_inv_range = (void *)cpufunc_nullop,
1277 .cf_sdcache_wb_range = (void *)cpufunc_nullop,
1278
1279 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all,
1280 .cf_idcache_wbinv_range = sheeva_idcache_wbinv_range,
1281
1282 /* Other functions */
1283
1284 .cf_flush_prefetchbuf = cpufunc_nullop,
1285 .cf_drain_writebuf = armv4_drain_writebuf,
1286 .cf_flush_brnchtgt_C = cpufunc_nullop,
1287 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop,
1288
1289 .cf_sleep = (void *)sheeva_cpu_sleep,
1290
1291 /* Soft functions */
1292
1293 .cf_dataabt_fixup = cpufunc_null_fixup,
1294 .cf_prefetchabt_fixup = cpufunc_null_fixup,
1295
1296 .cf_context_switch = arm10_context_switch,
1297
1298 .cf_setup = sheeva_setup
1299 };
1300 #endif /* CPU_SHEEVA */
1301
1302
1303 /*
1304 * Global constants also used by locore.s
1305 */
1306
1307 struct cpu_functions cpufuncs;
1308 u_int cputype;
1309
1310 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1311 defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_FA526) || \
1312 defined(CPU_SHEEVA) || \
1313 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1314 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
1315 defined(CPU_ARMV6) || defined(CPU_ARMV7)
1316 static void get_cachetype_cp15(void);
1317
1318 /* Additional cache information local to this file. Log2 of some of the
1319 above numbers. */
1320 static int arm_dcache_log2_nsets;
1321 static int arm_dcache_log2_assoc;
1322 static int arm_dcache_log2_linesize;
1323
1324 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1325 static inline u_int
get_cachesize_cp15(int cssr)1326 get_cachesize_cp15(int cssr)
1327 {
1328 #if defined(CPU_ARMV7)
1329 __asm volatile(".arch\tarmv7a");
1330
1331 armreg_csselr_write(cssr);
1332 isb(); /* sync to the new cssr */
1333
1334 #else
1335 __asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr) : "memory");
1336 #endif
1337 return armreg_ccsidr_read();
1338 }
1339 #endif
1340
1341 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1342 static void
get_cacheinfo_clidr(struct arm_cache_info * info,u_int level,u_int clidr)1343 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr)
1344 {
1345 u_int csid;
1346
1347 if (clidr & 6) {
1348 csid = get_cachesize_cp15(level << 1); /* select dcache values */
1349 info->dcache_sets = CPU_CSID_NUMSETS(csid) + 1;
1350 info->dcache_ways = CPU_CSID_ASSOC(csid) + 1;
1351 info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1352 info->dcache_way_size =
1353 info->dcache_line_size * info->dcache_sets;
1354 info->dcache_size = info->dcache_way_size * info->dcache_ways;
1355
1356 if (level == 0) {
1357 arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1;
1358 arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4;
1359 arm_dcache_log2_nsets =
1360 31 - __builtin_clz(info->dcache_sets*2-1);
1361 }
1362 }
1363
1364 info->cache_unified = (clidr == 4);
1365
1366 if (level > 0) {
1367 info->dcache_type = CACHE_TYPE_PIPT;
1368 info->icache_type = CACHE_TYPE_PIPT;
1369 }
1370
1371 if (info->cache_unified) {
1372 info->icache_ways = info->dcache_ways;
1373 info->icache_line_size = info->dcache_line_size;
1374 info->icache_way_size = info->dcache_way_size;
1375 info->icache_size = info->dcache_size;
1376 } else {
1377 csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */
1378 info->icache_sets = CPU_CSID_NUMSETS(csid) + 1;
1379 info->icache_ways = CPU_CSID_ASSOC(csid) + 1;
1380 info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4);
1381 info->icache_way_size = info->icache_line_size * info->icache_sets;
1382 info->icache_size = info->icache_way_size * info->icache_ways;
1383 }
1384 if (level == 0
1385 && info->dcache_way_size <= PAGE_SIZE
1386 && info->icache_way_size <= PAGE_SIZE) {
1387 arm_cache_prefer_mask = 0;
1388 }
1389 }
1390 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */
1391
1392 static void
get_cachetype_cp15(void)1393 get_cachetype_cp15(void)
1394 {
1395 u_int ctype, isize, dsize;
1396 u_int multiplier;
1397
1398 ctype = armreg_ctr_read();
1399
1400 /*
1401 * ...and thus spake the ARM ARM:
1402 *
1403 * If an <opcode2> value corresponding to an unimplemented or
1404 * reserved ID register is encountered, the System Control
1405 * processor returns the value of the main ID register.
1406 */
1407 if (ctype == cpu_idnum())
1408 goto out;
1409
1410 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1411 if (CPU_CT_FORMAT(ctype) == 4) {
1412 u_int clidr = armreg_clidr_read();
1413
1414 if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) {
1415 arm_pcache.icache_type = CACHE_TYPE_PIPT;
1416 } else {
1417 arm_pcache.icache_type = CACHE_TYPE_VIPT;
1418 arm_cache_prefer_mask = PAGE_SIZE;
1419 }
1420 #ifdef CPU_CORTEX
1421 if (CPU_ID_CORTEX_P(cpu_idnum())) {
1422 arm_pcache.dcache_type = CACHE_TYPE_PIPT;
1423 } else
1424 #endif
1425 {
1426 arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1427 }
1428 arm_pcache.cache_type = CPU_CT_CTYPE_WB14;
1429
1430 get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7);
1431 arm_dcache_align = arm_pcache.dcache_line_size;
1432 clidr >>= 3;
1433 if (clidr & 7) {
1434 get_cacheinfo_clidr(&arm_scache, 1, clidr & 7);
1435 if (arm_scache.dcache_line_size < arm_dcache_align)
1436 arm_dcache_align = arm_scache.dcache_line_size;
1437 }
1438 /*
1439 * The pmap cleans an entire way for an exec page so
1440 * we don't care that it's VIPT anymore.
1441 */
1442 if (arm_pcache.dcache_type == CACHE_TYPE_PIPT) {
1443 arm_cache_prefer_mask = 0;
1444 }
1445 goto out;
1446 }
1447 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */
1448
1449 if ((ctype & CPU_CT_S) == 0)
1450 arm_pcache.cache_unified = 1;
1451
1452 /*
1453 * If you want to know how this code works, go read the ARM ARM.
1454 */
1455
1456 arm_pcache.cache_type = CPU_CT_CTYPE(ctype);
1457
1458 if (arm_pcache.cache_unified == 0) {
1459 isize = CPU_CT_ISIZE(ctype);
1460 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1461 arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1462 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1463 if (isize & CPU_CT_xSIZE_M)
1464 arm_pcache.icache_line_size = 0; /* not present */
1465 else
1466 arm_pcache.icache_ways = 1;
1467 } else {
1468 arm_pcache.icache_ways = multiplier <<
1469 (CPU_CT_xSIZE_ASSOC(isize) - 1);
1470 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1471 arm_pcache.icache_type = CACHE_TYPE_VIPT;
1472 if (CPU_CT_xSIZE_P & isize)
1473 arm_cache_prefer_mask |=
1474 __BIT(9 + CPU_CT_xSIZE_SIZE(isize)
1475 - CPU_CT_xSIZE_ASSOC(isize))
1476 - PAGE_SIZE;
1477 #endif
1478 }
1479 arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1480 arm_pcache.icache_way_size =
1481 __BIT(9 + CPU_CT_xSIZE_SIZE(isize) - CPU_CT_xSIZE_ASSOC(isize));
1482 }
1483
1484 dsize = CPU_CT_DSIZE(ctype);
1485 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1486 arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1487 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1488 if (dsize & CPU_CT_xSIZE_M)
1489 arm_pcache.dcache_line_size = 0; /* not present */
1490 else
1491 arm_pcache.dcache_ways = 1;
1492 } else {
1493 arm_pcache.dcache_ways = multiplier <<
1494 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1495 #if (ARM_MMU_V6) > 0
1496 arm_pcache.dcache_type = CACHE_TYPE_VIPT;
1497 if ((CPU_CT_xSIZE_P & dsize)
1498 && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
1499 arm_cache_prefer_mask |=
1500 __BIT(9 + CPU_CT_xSIZE_SIZE(dsize)
1501 - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE;
1502 }
1503 #endif
1504 }
1505 arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1506 arm_pcache.dcache_way_size =
1507 __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) - CPU_CT_xSIZE_ASSOC(dsize));
1508
1509 arm_dcache_align = arm_pcache.dcache_line_size;
1510
1511 arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1512 arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1513 arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1514 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1515
1516 out:
1517 KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE,
1518 "arm_dcache_align=%u CACHE_LINE_SIZE=%u",
1519 arm_dcache_align, CACHE_LINE_SIZE);
1520 arm_dcache_align_mask = arm_dcache_align - 1;
1521 }
1522 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1523
1524 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \
1525 defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0)
1526 /* Cache information for CPUs without cache type registers. */
1527 struct cachetab {
1528 uint32_t ct_cpuid;
1529 int ct_pcache_type;
1530 int ct_pcache_unified;
1531 int ct_pdcache_size;
1532 int ct_pdcache_line_size;
1533 int ct_pdcache_ways;
1534 int ct_picache_size;
1535 int ct_picache_line_size;
1536 int ct_picache_ways;
1537 };
1538
1539 struct cachetab cachetab[] = {
1540 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
1541 { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 },
1542 { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 },
1543 { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 },
1544 { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 },
1545 { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 },
1546 { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 },
1547 { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 },
1548 { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 },
1549 /* XXX is this type right for SA-1? */
1550 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1551 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
1552 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
1553 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1554 { 0, 0, 0, 0, 0, 0, 0, 0}
1555 };
1556
1557 static void get_cachetype_table(void);
1558
1559 static void
get_cachetype_table(void)1560 get_cachetype_table(void)
1561 {
1562 int i;
1563 uint32_t cpuid = cpu_idnum();
1564
1565 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1566 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1567 arm_pcache.cache_type = cachetab[i].ct_pcache_type;
1568 arm_pcache.cache_unified = cachetab[i].ct_pcache_unified;
1569 arm_pcache.dcache_size = cachetab[i].ct_pdcache_size;
1570 arm_pcache.dcache_line_size =
1571 cachetab[i].ct_pdcache_line_size;
1572 arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways;
1573 if (arm_pcache.dcache_ways) {
1574 arm_pcache.dcache_way_size =
1575 arm_pcache.dcache_line_size
1576 / arm_pcache.dcache_ways;
1577 }
1578 arm_pcache.icache_size = cachetab[i].ct_picache_size;
1579 arm_pcache.icache_line_size =
1580 cachetab[i].ct_picache_line_size;
1581 arm_pcache.icache_ways = cachetab[i].ct_picache_ways;
1582 if (arm_pcache.icache_ways) {
1583 arm_pcache.icache_way_size =
1584 arm_pcache.icache_line_size
1585 / arm_pcache.icache_ways;
1586 }
1587 }
1588 }
1589
1590 arm_dcache_align = arm_pcache.dcache_line_size;
1591 arm_dcache_align_mask = arm_dcache_align - 1;
1592 }
1593
1594 #endif /* ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */
1595
1596
1597 #if defined(CPU_CORTEX) || defined(CPU_PJ4B)
1598 static inline void
set_cpufuncs_mpfixup(void)1599 set_cpufuncs_mpfixup(void)
1600 {
1601 #ifdef MULTIPROCESSOR
1602 /* If MP extensions are present, patch in MP TLB ops */
1603 const uint32_t mpidr = armreg_mpidr_read();
1604 if ((mpidr & (MPIDR_MP|MPIDR_U)) == MPIDR_MP) {
1605 cpufuncs.cf_tlb_flushID = armv7mp_tlb_flushID;
1606 cpufuncs.cf_tlb_flushID_SE = armv7mp_tlb_flushID_SE;
1607 cpufuncs.cf_tlb_flushI = armv7mp_tlb_flushI;
1608 cpufuncs.cf_tlb_flushI_SE = armv7mp_tlb_flushI_SE;
1609 cpufuncs.cf_tlb_flushD = armv7mp_tlb_flushD;
1610 cpufuncs.cf_tlb_flushD_SE = armv7mp_tlb_flushD_SE;
1611 }
1612 #endif
1613 }
1614 #endif
1615
1616 /*
1617 * Cannot panic here as we may not have a console yet ...
1618 */
1619
1620 int
set_cpufuncs(void)1621 set_cpufuncs(void)
1622 {
1623 if (cputype == 0) {
1624 cputype = cpufunc_id();
1625 cputype &= CPU_ID_CPU_MASK;
1626 }
1627
1628 /*
1629 * NOTE: cpu_do_powersave defaults to off. If we encounter a
1630 * CPU type where we want to use it by default, then we set it.
1631 */
1632 #ifdef CPU_ARM6
1633 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1634 (cputype & 0x00000f00) == 0x00000600) {
1635 cpufuncs = arm6_cpufuncs;
1636 get_cachetype_table();
1637 pmap_pte_init_generic();
1638 return 0;
1639 }
1640 #endif /* CPU_ARM6 */
1641 #ifdef CPU_ARM7
1642 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1643 CPU_ID_IS7(cputype) &&
1644 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) {
1645 cpufuncs = arm7_cpufuncs;
1646 get_cachetype_table();
1647 pmap_pte_init_generic();
1648 return 0;
1649 }
1650 #endif /* CPU_ARM7 */
1651 #ifdef CPU_ARM7TDMI
1652 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1653 CPU_ID_IS7(cputype) &&
1654 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1655 cpufuncs = arm7tdmi_cpufuncs;
1656 get_cachetype_cp15();
1657 pmap_pte_init_generic();
1658 return 0;
1659 }
1660 #endif
1661 #ifdef CPU_ARM8
1662 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1663 (cputype & 0x0000f000) == 0x00008000) {
1664 cpufuncs = arm8_cpufuncs;
1665 get_cachetype_cp15();
1666 pmap_pte_init_arm8();
1667 return 0;
1668 }
1669 #endif /* CPU_ARM8 */
1670 #ifdef CPU_ARM9
1671 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1672 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1673 (cputype & 0x0000f000) == 0x00009000) {
1674 cpufuncs = arm9_cpufuncs;
1675 get_cachetype_cp15();
1676 arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1677 arm9_dcache_sets_max =
1678 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1679 arm9_dcache_sets_inc;
1680 arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1681 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1682 #ifdef ARM9_CACHE_WRITE_THROUGH
1683 pmap_pte_init_arm9();
1684 #else
1685 pmap_pte_init_generic();
1686 #endif
1687 return 0;
1688 }
1689 #endif /* CPU_ARM9 */
1690 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1691 if (cputype == CPU_ID_ARM926EJS ||
1692 cputype == CPU_ID_ARM1026EJS) {
1693 cpufuncs = armv5_ec_cpufuncs;
1694 get_cachetype_cp15();
1695 pmap_pte_init_generic();
1696 return 0;
1697 }
1698 #endif /* CPU_ARM9E || CPU_ARM10 */
1699 #if defined(CPU_SHEEVA)
1700 if (cputype == CPU_ID_MV88SV131 ||
1701 cputype == CPU_ID_MV88FR571_VD) {
1702 cpufuncs = sheeva_cpufuncs;
1703 get_cachetype_cp15();
1704 pmap_pte_init_generic();
1705 cpu_do_powersave = 1; /* Enable powersave */
1706 return 0;
1707 }
1708 #endif /* CPU_SHEEVA */
1709 #ifdef CPU_ARM10
1710 if (/* cputype == CPU_ID_ARM1020T || */
1711 cputype == CPU_ID_ARM1020E) {
1712 /*
1713 * Select write-through cacheing (this isn't really an
1714 * option on ARM1020T).
1715 */
1716 cpufuncs = arm10_cpufuncs;
1717 get_cachetype_cp15();
1718 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1719 armv5_dcache_sets_max =
1720 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) -
1721 armv5_dcache_sets_inc;
1722 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1723 armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1724 pmap_pte_init_generic();
1725 return 0;
1726 }
1727 #endif /* CPU_ARM10 */
1728
1729
1730 #if defined(CPU_ARM11MPCORE)
1731 if (cputype == CPU_ID_ARM11MPCORE) {
1732 cpufuncs = arm11mpcore_cpufuncs;
1733 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1734 cpu_armv6_p = true;
1735 #endif
1736 get_cachetype_cp15();
1737 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize;
1738 armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize +
1739 arm_dcache_log2_nsets)) - armv5_dcache_sets_inc;
1740 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc);
1741 armv5_dcache_index_max = 0U - armv5_dcache_index_inc;
1742 cpu_do_powersave = 1; /* Enable powersave */
1743 pmap_pte_init_arm11mpcore();
1744 if (arm_cache_prefer_mask)
1745 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1746
1747 return 0;
1748
1749 }
1750 #endif /* CPU_ARM11MPCORE */
1751
1752 #if defined(CPU_ARM11)
1753 if (cputype == CPU_ID_ARM1136JS ||
1754 cputype == CPU_ID_ARM1136JSR1 ||
1755 cputype == CPU_ID_ARM1176JZS) {
1756 cpufuncs = arm11_cpufuncs;
1757 #if defined(CPU_ARM1136)
1758 if (cputype == CPU_ID_ARM1136JS ||
1759 cputype == CPU_ID_ARM1136JSR1) {
1760 cpufuncs = arm1136_cpufuncs;
1761 if (cputype == CPU_ID_ARM1136JS)
1762 cpufuncs.cf_sleep = arm1136_sleep_rev0;
1763 }
1764 #endif
1765 #if defined(CPU_ARM1176)
1766 if (cputype == CPU_ID_ARM1176JZS) {
1767 cpufuncs = arm1176_cpufuncs;
1768 }
1769 #endif
1770 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
1771 cpu_armv6_p = true;
1772 #endif
1773 cpu_do_powersave = 1; /* Enable powersave */
1774 get_cachetype_cp15();
1775 #ifdef ARM11_CACHE_WRITE_THROUGH
1776 pmap_pte_init_arm11();
1777 #else
1778 pmap_pte_init_armv6();
1779 #endif
1780 if (arm_cache_prefer_mask)
1781 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1782
1783 /*
1784 * Start and reset the PMC Cycle Counter.
1785 */
1786 armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1787 return 0;
1788 }
1789 #endif /* CPU_ARM11 */
1790 #ifdef CPU_SA110
1791 if (cputype == CPU_ID_SA110) {
1792 cpufuncs = sa110_cpufuncs;
1793 get_cachetype_table();
1794 pmap_pte_init_sa1();
1795 return 0;
1796 }
1797 #endif /* CPU_SA110 */
1798 #ifdef CPU_SA1100
1799 if (cputype == CPU_ID_SA1100) {
1800 cpufuncs = sa11x0_cpufuncs;
1801 get_cachetype_table();
1802 pmap_pte_init_sa1();
1803
1804 /* Use powersave on this CPU. */
1805 cpu_do_powersave = 1;
1806
1807 return 0;
1808 }
1809 #endif /* CPU_SA1100 */
1810 #ifdef CPU_SA1110
1811 if (cputype == CPU_ID_SA1110) {
1812 cpufuncs = sa11x0_cpufuncs;
1813 get_cachetype_table();
1814 pmap_pte_init_sa1();
1815
1816 /* Use powersave on this CPU. */
1817 cpu_do_powersave = 1;
1818
1819 return 0;
1820 }
1821 #endif /* CPU_SA1110 */
1822 #ifdef CPU_FA526
1823 if (cputype == CPU_ID_FA526) {
1824 cpufuncs = fa526_cpufuncs;
1825 get_cachetype_cp15();
1826 pmap_pte_init_generic();
1827
1828 /* Use powersave on this CPU. */
1829 cpu_do_powersave = 1;
1830
1831 return 0;
1832 }
1833 #endif /* CPU_FA526 */
1834 #ifdef CPU_IXP12X0
1835 if (cputype == CPU_ID_IXP1200) {
1836 cpufuncs = ixp12x0_cpufuncs;
1837 get_cachetype_table();
1838 pmap_pte_init_sa1();
1839 return 0;
1840 }
1841 #endif /* CPU_IXP12X0 */
1842 #ifdef CPU_XSCALE_80200
1843 if (cputype == CPU_ID_80200) {
1844 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1845
1846 i80200_icu_init();
1847
1848 /*
1849 * Reset the Performance Monitoring Unit to a
1850 * pristine state:
1851 * - CCNT, PMN0, PMN1 reset to 0
1852 * - overflow indications cleared
1853 * - all counters disabled
1854 */
1855 __asm volatile("mcr p14, 0, %0, c0, c0, 0"
1856 :
1857 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1858 PMNC_CC_IF));
1859
1860 #if defined(XSCALE_CCLKCFG)
1861 /*
1862 * Crank CCLKCFG to maximum legal value.
1863 */
1864 __asm volatile ("mcr p14, 0, %0, c6, c0, 0"
1865 :
1866 : "r" (XSCALE_CCLKCFG));
1867 #endif
1868
1869 /*
1870 * XXX Disable ECC in the Bus Controller Unit; we
1871 * don't really support it, yet. Clear any pending
1872 * error indications.
1873 */
1874 __asm volatile("mcr p13, 0, %0, c0, c1, 0"
1875 :
1876 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1877
1878 cpufuncs = xscale_cpufuncs;
1879
1880 /*
1881 * i80200 errata: Step-A0 and A1 have a bug where
1882 * D$ dirty bits are not cleared on "invalidate by
1883 * address".
1884 *
1885 * Workaround: Clean cache line before invalidating.
1886 */
1887 if (rev == 0 || rev == 1)
1888 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1889
1890 get_cachetype_cp15();
1891 pmap_pte_init_xscale();
1892 return 0;
1893 }
1894 #endif /* CPU_XSCALE_80200 */
1895 #ifdef CPU_XSCALE_80321
1896 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1897 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1898 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1899 i80321_icu_init();
1900
1901 /*
1902 * Reset the Performance Monitoring Unit to a
1903 * pristine state:
1904 * - CCNT, PMN0, PMN1 reset to 0
1905 * - overflow indications cleared
1906 * - all counters disabled
1907 */
1908 __asm volatile("mcr p14, 0, %0, c0, c0, 0"
1909 :
1910 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1911 PMNC_CC_IF));
1912
1913 cpufuncs = xscale_cpufuncs;
1914
1915 get_cachetype_cp15();
1916 pmap_pte_init_xscale();
1917 return 0;
1918 }
1919 #endif /* CPU_XSCALE_80321 */
1920 #ifdef __CPU_XSCALE_PXA2XX
1921 /* ignore core revision to test PXA2xx CPUs */
1922 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1923 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1924 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1925
1926 cpufuncs = xscale_cpufuncs;
1927
1928 get_cachetype_cp15();
1929 pmap_pte_init_xscale();
1930
1931 /* Use powersave on this CPU. */
1932 cpu_do_powersave = 1;
1933
1934 return 0;
1935 }
1936 #endif /* __CPU_XSCALE_PXA2XX */
1937 #ifdef CPU_XSCALE_IXP425
1938 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1939 cputype == CPU_ID_IXP425_266) {
1940 ixp425_icu_init();
1941
1942 cpufuncs = xscale_cpufuncs;
1943
1944 get_cachetype_cp15();
1945 pmap_pte_init_xscale();
1946
1947 return 0;
1948 }
1949 #endif /* CPU_XSCALE_IXP425 */
1950 #if defined(CPU_CORTEX)
1951 if (CPU_ID_CORTEX_P(cputype)) {
1952 cpufuncs = armv7_cpufuncs;
1953 set_cpufuncs_mpfixup();
1954 cpu_do_powersave = 1; /* Enable powersave */
1955 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
1956 cpu_armv7_p = true;
1957 #endif
1958 get_cachetype_cp15();
1959 pmap_pte_init_armv7();
1960 if (arm_cache_prefer_mask)
1961 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1;
1962
1963 /*
1964 * Start and reset the PMC Cycle Counter.
1965 */
1966 armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C);
1967 armreg_pmintenclr_write(PMINTEN_C | PMINTEN_P);
1968 armreg_pmcntenset_write(CORTEX_CNTENS_C);
1969 return 0;
1970 }
1971 #endif /* CPU_CORTEX */
1972
1973 #if defined(CPU_PJ4B)
1974 if ((cputype == CPU_ID_MV88SV581X_V6 ||
1975 cputype == CPU_ID_MV88SV581X_V7 ||
1976 cputype == CPU_ID_MV88SV584X_V7 ||
1977 cputype == CPU_ID_ARM_88SV581X_V6 ||
1978 cputype == CPU_ID_ARM_88SV581X_V7) &&
1979 (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) {
1980 cpufuncs = pj4bv7_cpufuncs;
1981 set_cpufuncs_mpfixup();
1982 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
1983 cpu_armv7_p = true;
1984 #endif
1985 get_cachetype_cp15();
1986 pmap_pte_init_armv7();
1987 return 0;
1988 }
1989 #endif /* CPU_PJ4B */
1990
1991 /*
1992 * Bzzzz. And the answer was ...
1993 */
1994 panic("No support for this CPU type (%08x) in kernel", cputype);
1995 return ARCHITECTURE_NOT_PRESENT;
1996 }
1997
1998 /*
1999 * Fixup routines for data and prefetch aborts.
2000 *
2001 * Several compile time symbols are used
2002 *
2003 * DEBUG_FAULT_CORRECTION - Print debugging information during the
2004 * correction of registers after a fault.
2005 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
2006 * when defined should use late aborts
2007 */
2008
2009
2010 /*
2011 * Null abort fixup routine.
2012 * For use when no fixup is required.
2013 */
2014 int
cpufunc_null_fixup(void * arg)2015 cpufunc_null_fixup(void *arg)
2016 {
2017 return(ABORT_FIXUP_OK);
2018 }
2019
2020
2021 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI)
2022
2023 #ifdef DEBUG_FAULT_CORRECTION
2024 #define DFC_PRINTF(x) printf x
2025 #define DFC_DISASSEMBLE(x) disassemble(x)
2026 #else
2027 #define DFC_PRINTF(x) /* nothing */
2028 #define DFC_DISASSEMBLE(x) /* nothing */
2029 #endif
2030
2031 /*
2032 * "Early" data abort fixup.
2033 *
2034 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
2035 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
2036 *
2037 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
2038 */
2039 int
early_abort_fixup(void * arg)2040 early_abort_fixup(void *arg)
2041 {
2042 trapframe_t *frame = arg;
2043 u_int fault_pc;
2044 u_int fault_instruction;
2045 int saved_lr = 0;
2046
2047 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2048
2049 /* Ok an abort in SVC mode */
2050
2051 /*
2052 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2053 * as the fault happened in svc mode but we need it in the
2054 * usr slot so we can treat the registers as an array of ints
2055 * during fixing.
2056 * NOTE: This PC is in the position but writeback is not
2057 * allowed on r15.
2058 * Doing it like this is more efficient than trapping this
2059 * case in all possible locations in the following fixup code.
2060 */
2061
2062 saved_lr = frame->tf_usr_lr;
2063 frame->tf_usr_lr = frame->tf_svc_lr;
2064
2065 /*
2066 * Note the trapframe does not have the SVC r13 so a fault
2067 * from an instruction with writeback to r13 in SVC mode is
2068 * not allowed. This should not happen as the kstack is
2069 * always valid.
2070 */
2071 }
2072
2073 /* Get fault address and status from the CPU */
2074
2075 fault_pc = frame->tf_pc;
2076 fault_instruction = *((volatile unsigned int *)fault_pc);
2077
2078 /* Decode the fault instruction and fix the registers as needed */
2079
2080 if ((fault_instruction & 0x0e000000) == 0x08000000) {
2081 int base;
2082 int loop;
2083 int count;
2084 int *registers = &frame->tf_r0;
2085
2086 DFC_PRINTF(("LDM/STM\n"));
2087 DFC_DISASSEMBLE(fault_pc);
2088 if (fault_instruction & (1 << 21)) {
2089 DFC_PRINTF(("This instruction must be corrected\n"));
2090 base = (fault_instruction >> 16) & 0x0f;
2091 if (base == 15)
2092 return ABORT_FIXUP_FAILED;
2093 /* Count registers transferred */
2094 count = 0;
2095 for (loop = 0; loop < 16; ++loop) {
2096 if (fault_instruction & (1<<loop))
2097 ++count;
2098 }
2099 DFC_PRINTF(("%d registers used\n", count));
2100 DFC_PRINTF(("Corrected r%d by %d bytes ",
2101 base, count * 4));
2102 if (fault_instruction & (1 << 23)) {
2103 DFC_PRINTF(("down\n"));
2104 registers[base] -= count * 4;
2105 } else {
2106 DFC_PRINTF(("up\n"));
2107 registers[base] += count * 4;
2108 }
2109 }
2110 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
2111 int base;
2112 int offset;
2113 int *registers = &frame->tf_r0;
2114
2115 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
2116
2117 DFC_DISASSEMBLE(fault_pc);
2118
2119 /* Only need to fix registers if write back is turned on */
2120
2121 if ((fault_instruction & (1 << 21)) != 0) {
2122 base = (fault_instruction >> 16) & 0x0f;
2123 if (base == 13 &&
2124 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2125 return ABORT_FIXUP_FAILED;
2126 if (base == 15)
2127 return ABORT_FIXUP_FAILED;
2128
2129 offset = (fault_instruction & 0xff) << 2;
2130 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2131 if ((fault_instruction & (1 << 23)) != 0)
2132 offset = -offset;
2133 registers[base] += offset;
2134 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2135 }
2136 }
2137
2138 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2139
2140 /* Ok an abort in SVC mode */
2141
2142 /*
2143 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2144 * as the fault happened in svc mode but we need it in the
2145 * usr slot so we can treat the registers as an array of ints
2146 * during fixing.
2147 * NOTE: This PC is in the position but writeback is not
2148 * allowed on r15.
2149 * Doing it like this is more efficient than trapping this
2150 * case in all possible locations in the prior fixup code.
2151 */
2152
2153 frame->tf_svc_lr = frame->tf_usr_lr;
2154 frame->tf_usr_lr = saved_lr;
2155
2156 /*
2157 * Note the trapframe does not have the SVC r13 so a fault
2158 * from an instruction with writeback to r13 in SVC mode is
2159 * not allowed. This should not happen as the kstack is
2160 * always valid.
2161 */
2162 }
2163
2164 return(ABORT_FIXUP_OK);
2165 }
2166 #endif /* CPU_ARM6/7 */
2167
2168
2169 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \
2170 defined(CPU_ARM7TDMI)
2171 /*
2172 * "Late" (base updated) data abort fixup
2173 *
2174 * For ARM6 (in late-abort mode) and ARM7.
2175 *
2176 * In this model, all data-transfer instructions need fixing up. We defer
2177 * LDM, STM, LDC and STC fixup to the early-abort handler.
2178 */
2179 int
late_abort_fixup(void * arg)2180 late_abort_fixup(void *arg)
2181 {
2182 trapframe_t *frame = arg;
2183 u_int fault_pc;
2184 u_int fault_instruction;
2185 int saved_lr = 0;
2186
2187 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2188
2189 /* Ok an abort in SVC mode */
2190
2191 /*
2192 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2193 * as the fault happened in svc mode but we need it in the
2194 * usr slot so we can treat the registers as an array of ints
2195 * during fixing.
2196 * NOTE: This PC is in the position but writeback is not
2197 * allowed on r15.
2198 * Doing it like this is more efficient than trapping this
2199 * case in all possible locations in the following fixup code.
2200 */
2201
2202 saved_lr = frame->tf_usr_lr;
2203 frame->tf_usr_lr = frame->tf_svc_lr;
2204
2205 /*
2206 * Note the trapframe does not have the SVC r13 so a fault
2207 * from an instruction with writeback to r13 in SVC mode is
2208 * not allowed. This should not happen as the kstack is
2209 * always valid.
2210 */
2211 }
2212
2213 /* Get fault address and status from the CPU */
2214
2215 fault_pc = frame->tf_pc;
2216 fault_instruction = *((volatile unsigned int *)fault_pc);
2217
2218 /* Decode the fault instruction and fix the registers as needed */
2219
2220 /* Was is a swap instruction ? */
2221
2222 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
2223 DFC_DISASSEMBLE(fault_pc);
2224 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
2225
2226 /* Was is a ldr/str instruction */
2227 /* This is for late abort only */
2228
2229 int base;
2230 int offset;
2231 int *registers = &frame->tf_r0;
2232
2233 DFC_DISASSEMBLE(fault_pc);
2234
2235 /* This is for late abort only */
2236
2237 if ((fault_instruction & (1 << 24)) == 0
2238 || (fault_instruction & (1 << 21)) != 0) {
2239 /* postindexed ldr/str with no writeback */
2240
2241 base = (fault_instruction >> 16) & 0x0f;
2242 if (base == 13 &&
2243 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
2244 return ABORT_FIXUP_FAILED;
2245 if (base == 15)
2246 return ABORT_FIXUP_FAILED;
2247 DFC_PRINTF(("late abt fix: r%d=%08x : ",
2248 base, registers[base]));
2249 if ((fault_instruction & (1 << 25)) == 0) {
2250 /* Immediate offset - easy */
2251
2252 offset = fault_instruction & 0xfff;
2253 if ((fault_instruction & (1 << 23)))
2254 offset = -offset;
2255 registers[base] += offset;
2256 DFC_PRINTF(("imm=%08x ", offset));
2257 } else {
2258 /* offset is a shifted register */
2259 int shift;
2260
2261 offset = fault_instruction & 0x0f;
2262 if (offset == base)
2263 return ABORT_FIXUP_FAILED;
2264
2265 /*
2266 * Register offset - hard we have to
2267 * cope with shifts !
2268 */
2269 offset = registers[offset];
2270
2271 if ((fault_instruction & (1 << 4)) == 0)
2272 /* shift with amount */
2273 shift = (fault_instruction >> 7) & 0x1f;
2274 else {
2275 /* shift with register */
2276 if ((fault_instruction & (1 << 7)) != 0)
2277 /* undefined for now so bail out */
2278 return ABORT_FIXUP_FAILED;
2279 shift = ((fault_instruction >> 8) & 0xf);
2280 if (base == shift)
2281 return ABORT_FIXUP_FAILED;
2282 DFC_PRINTF(("shift reg=%d ", shift));
2283 shift = registers[shift];
2284 }
2285 DFC_PRINTF(("shift=%08x ", shift));
2286 switch (((fault_instruction >> 5) & 0x3)) {
2287 case 0 : /* Logical left */
2288 offset = (int)(((u_int)offset) << shift);
2289 break;
2290 case 1 : /* Logical Right */
2291 if (shift == 0) shift = 32;
2292 offset = (int)(((u_int)offset) >> shift);
2293 break;
2294 case 2 : /* Arithmetic Right */
2295 if (shift == 0) shift = 32;
2296 offset = (int)(((int)offset) >> shift);
2297 break;
2298 case 3 : /* Rotate right (rol or rxx) */
2299 return ABORT_FIXUP_FAILED;
2300 break;
2301 }
2302
2303 DFC_PRINTF(("abt: fixed LDR/STR with "
2304 "register offset\n"));
2305 if ((fault_instruction & (1 << 23)))
2306 offset = -offset;
2307 DFC_PRINTF(("offset=%08x ", offset));
2308 registers[base] += offset;
2309 }
2310 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
2311 }
2312 }
2313
2314 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
2315
2316 /* Ok an abort in SVC mode */
2317
2318 /*
2319 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2320 * as the fault happened in svc mode but we need it in the
2321 * usr slot so we can treat the registers as an array of ints
2322 * during fixing.
2323 * NOTE: This PC is in the position but writeback is not
2324 * allowed on r15.
2325 * Doing it like this is more efficient than trapping this
2326 * case in all possible locations in the prior fixup code.
2327 */
2328
2329 frame->tf_svc_lr = frame->tf_usr_lr;
2330 frame->tf_usr_lr = saved_lr;
2331
2332 /*
2333 * Note the trapframe does not have the SVC r13 so a fault
2334 * from an instruction with writeback to r13 in SVC mode is
2335 * not allowed. This should not happen as the kstack is
2336 * always valid.
2337 */
2338 }
2339
2340 /*
2341 * Now let the early-abort fixup routine have a go, in case it
2342 * was an LDM, STM, LDC or STC that faulted.
2343 */
2344
2345 return early_abort_fixup(arg);
2346 }
2347 #endif /* CPU_ARM6(LATE)/7/7TDMI */
2348
2349 /*
2350 * CPU Setup code
2351 */
2352
2353 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
2354 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
2355 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2356 defined(CPU_FA526) || \
2357 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2358 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
2359 defined(CPU_ARM10) || defined(CPU_SHEEVA) || \
2360 defined(CPU_ARMV6) || defined(CPU_ARMV7)
2361
2362 #define IGN 0
2363 #define OR 1
2364 #define BIC 2
2365
2366 struct cpu_option {
2367 const char *co_name;
2368 int co_falseop;
2369 int co_trueop;
2370 int co_value;
2371 };
2372
2373 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2374
2375 static u_int __noasan
parse_cpu_options(char * args,struct cpu_option * optlist,u_int cpuctrl)2376 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl)
2377 {
2378 int integer;
2379
2380 if (args == NULL)
2381 return(cpuctrl);
2382
2383 while (optlist->co_name) {
2384 if (get_bootconf_option(args, optlist->co_name,
2385 BOOTOPT_TYPE_BOOLEAN, &integer)) {
2386 if (integer) {
2387 if (optlist->co_trueop == OR)
2388 cpuctrl |= optlist->co_value;
2389 else if (optlist->co_trueop == BIC)
2390 cpuctrl &= ~optlist->co_value;
2391 } else {
2392 if (optlist->co_falseop == OR)
2393 cpuctrl |= optlist->co_value;
2394 else if (optlist->co_falseop == BIC)
2395 cpuctrl &= ~optlist->co_value;
2396 }
2397 }
2398 ++optlist;
2399 }
2400 return(cpuctrl);
2401 }
2402 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
2403
2404 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \
2405 || defined(CPU_ARM8)
2406 struct cpu_option arm678_options[] = {
2407 #ifdef COMPAT_12
2408 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2409 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2410 #endif /* COMPAT_12 */
2411 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2412 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2413 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2414 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2415 { NULL, IGN, IGN, 0 }
2416 };
2417
2418 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2419
2420 #ifdef CPU_ARM6
2421 struct cpu_option arm6_options[] = {
2422 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2423 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2424 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2425 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2426 { NULL, IGN, IGN, 0 }
2427 };
2428
2429 void
arm6_setup(char * args)2430 arm6_setup(char *args)
2431 {
2432
2433 /* Set up default control registers bits */
2434 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2435 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2436 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2437 #if 0
2438 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2439 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2440 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2441 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2442 | CPU_CONTROL_AFLT_ENABLE;
2443 #endif
2444
2445 #ifdef ARM6_LATE_ABORT
2446 cpuctrl |= CPU_CONTROL_LABT_ENABLE;
2447 #endif /* ARM6_LATE_ABORT */
2448
2449 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2450 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2451 #endif
2452
2453 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2454 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl);
2455
2456 #ifdef __ARMEB__
2457 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2458 #endif
2459
2460 /* Clear out the cache */
2461 cpu_idcache_wbinv_all();
2462
2463 /* Set the control register */
2464 cpu_control(0xffffffff, cpuctrl);
2465 }
2466 #endif /* CPU_ARM6 */
2467
2468 #ifdef CPU_ARM7
2469 struct cpu_option arm7_options[] = {
2470 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2471 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2472 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2473 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2474 #ifdef COMPAT_12
2475 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
2476 #endif /* COMPAT_12 */
2477 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
2478 { NULL, IGN, IGN, 0 }
2479 };
2480
2481 void
arm7_setup(char * args)2482 arm7_setup(char *args)
2483 {
2484
2485 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2486 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2487 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2488 #if 0
2489 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2490 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2491 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2492 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE
2493 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE
2494 | CPU_CONTROL_AFLT_ENABLE;
2495 #endif
2496
2497 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2498 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2499 #endif
2500
2501 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2502 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl);
2503
2504 #ifdef __ARMEB__
2505 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2506 #endif
2507
2508 /* Clear out the cache */
2509 cpu_idcache_wbinv_all();
2510
2511 /* Set the control register */
2512 cpu_control(0xffffffff, cpuctrl);
2513 }
2514 #endif /* CPU_ARM7 */
2515
2516 #ifdef CPU_ARM7TDMI
2517 struct cpu_option arm7tdmi_options[] = {
2518 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2519 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2520 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2521 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2522 #ifdef COMPAT_12
2523 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
2524 #endif /* COMPAT_12 */
2525 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
2526 { NULL, IGN, IGN, 0 }
2527 };
2528
2529 void
arm7tdmi_setup(char * args)2530 arm7tdmi_setup(char *args)
2531 {
2532 int cpuctrl;
2533
2534 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2535 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2536 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2537
2538 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2539 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2540
2541 #ifdef __ARMEB__
2542 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2543 #endif
2544
2545 /* Clear out the cache */
2546 cpu_idcache_wbinv_all();
2547
2548 /* Set the control register */
2549 cpu_control(0xffffffff, cpuctrl);
2550 }
2551 #endif /* CPU_ARM7TDMI */
2552
2553 #ifdef CPU_ARM8
2554 struct cpu_option arm8_options[] = {
2555 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2556 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2557 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2558 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2559 #ifdef COMPAT_12
2560 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2561 #endif /* COMPAT_12 */
2562 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2563 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2564 { NULL, IGN, IGN, 0 }
2565 };
2566
2567 void
arm8_setup(char * args)2568 arm8_setup(char *args)
2569 {
2570 int integer;
2571 int clocktest;
2572 int setclock = 0;
2573
2574 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2575 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2576 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2577 #if 0
2578 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2579 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2580 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2581 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2582 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2583 #endif
2584
2585 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2586 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2587 #endif
2588
2589 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2590 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2591
2592 #ifdef __ARMEB__
2593 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2594 #endif
2595
2596 /* Get clock configuration */
2597 clocktest = arm8_clock_config(0, 0) & 0x0f;
2598
2599 /* Special ARM8 clock and test configuration */
2600 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2601 clocktest = 0;
2602 setclock = 1;
2603 }
2604 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2605 if (integer)
2606 clocktest |= 0x01;
2607 else
2608 clocktest &= ~(0x01);
2609 setclock = 1;
2610 }
2611 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2612 if (integer)
2613 clocktest |= 0x02;
2614 else
2615 clocktest &= ~(0x02);
2616 setclock = 1;
2617 }
2618 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2619 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2620 setclock = 1;
2621 }
2622 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2623 clocktest |= (integer & 7) << 5;
2624 setclock = 1;
2625 }
2626
2627 /* Clear out the cache */
2628 cpu_idcache_wbinv_all();
2629
2630 /* Set the control register */
2631 cpu_control(0xffffffff, cpuctrl);
2632
2633 /* Set the clock/test register */
2634 if (setclock)
2635 arm8_clock_config(0x7f, clocktest);
2636 }
2637 #endif /* CPU_ARM8 */
2638
2639 #ifdef CPU_ARM9
2640 struct cpu_option arm9_options[] = {
2641 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2642 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2643 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2644 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2645 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2646 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2647 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2648 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2649 { NULL, IGN, IGN, 0 }
2650 };
2651
2652 void
arm9_setup(char * args)2653 arm9_setup(char *args)
2654 {
2655
2656 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2657 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2658 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2659 | CPU_CONTROL_WBUF_ENABLE;
2660 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2661 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2662 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2663 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2664 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2665 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2666 | CPU_CONTROL_ROUNDROBIN;
2667
2668 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2669 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2670 #endif
2671
2672 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2673
2674 #ifdef __ARMEB__
2675 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2676 #endif
2677
2678 #ifndef ARM_HAS_VBAR
2679 if (vector_page == ARM_VECTORS_HIGH)
2680 cpuctrl |= CPU_CONTROL_VECRELOC;
2681 #endif
2682
2683 /* Clear out the cache */
2684 cpu_idcache_wbinv_all();
2685
2686 /* Set the control register */
2687 cpu_control(cpuctrlmask, cpuctrl);
2688
2689 }
2690 #endif /* CPU_ARM9 */
2691
2692 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2693 struct cpu_option arm10_options[] = {
2694 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2695 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2696 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2697 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2698 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2699 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2700 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2701 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2702 { NULL, IGN, IGN, 0 }
2703 };
2704
2705 void
arm10_setup(char * args)2706 arm10_setup(char *args)
2707 {
2708
2709 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2710 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2711 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2712 #if 0
2713 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2714 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2715 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2716 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2717 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_VECRELOC
2718 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2719 #endif
2720
2721 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2722 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2723 #endif
2724
2725 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2726
2727 #ifdef __ARMEB__
2728 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2729 #endif
2730
2731 #ifndef ARM_HAS_VBAR
2732 if (vector_page == ARM_VECTORS_HIGH)
2733 cpuctrl |= CPU_CONTROL_VECRELOC;
2734 #endif
2735
2736 /* Clear out the cache */
2737 cpu_idcache_wbinv_all();
2738
2739 /* Now really make sure they are clean. */
2740 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2741
2742 /* Set the control register */
2743 cpu_control(0xffffffff, cpuctrl);
2744
2745 /* And again. */
2746 cpu_idcache_wbinv_all();
2747 }
2748 #endif /* CPU_ARM9E || CPU_ARM10 */
2749
2750 #if defined(CPU_ARM11)
2751 struct cpu_option arm11_options[] = {
2752 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2753 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2754 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2755 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2756 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2757 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2758 { "arm11.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2759 { NULL, IGN, IGN, 0 }
2760 };
2761
2762 void
arm11_setup(char * args)2763 arm11_setup(char *args)
2764 {
2765
2766 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2767 #ifdef ARM_MMU_EXTENDED
2768 | CPU_CONTROL_XP_ENABLE
2769 #endif
2770 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2771 /* | CPU_CONTROL_BPRD_ENABLE */;
2772
2773 #ifdef __ARMEB__
2774 cpuctrl |= CPU_CONTROL_EX_BEND;
2775 #endif
2776
2777 int cpuctrlmask = cpuctrl
2778 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
2779 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2780 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK
2781 | CPU_CONTROL_VECRELOC;
2782
2783 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2784 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2785 #endif
2786
2787 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2788
2789 #ifndef ARM_HAS_VBAR
2790 if (vector_page == ARM_VECTORS_HIGH)
2791 cpuctrl |= CPU_CONTROL_VECRELOC;
2792 #endif
2793
2794 /* Clear out the cache */
2795 cpu_idcache_wbinv_all();
2796
2797 /* Now really make sure they are clean. */
2798 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2799
2800 /* Allow detection code to find the VFP if it's fitted. */
2801 armreg_cpacr_write(0x0fffffff);
2802
2803 /* Set the control register */
2804 cpu_control(cpuctrlmask, cpuctrl);
2805
2806 /* And again. */
2807 cpu_idcache_wbinv_all();
2808 }
2809 #endif /* CPU_ARM11 */
2810
2811 #if defined(CPU_ARM11MPCORE)
2812
2813 void
arm11mpcore_setup(char * args)2814 arm11mpcore_setup(char *args)
2815 {
2816
2817 int cpuctrl = CPU_CONTROL_IC_ENABLE
2818 | CPU_CONTROL_DC_ENABLE
2819 #ifdef ARM_MMU_EXTENDED
2820 | CPU_CONTROL_XP_ENABLE
2821 #endif
2822 | CPU_CONTROL_BPRD_ENABLE ;
2823
2824 #ifdef __ARMEB__
2825 cpuctrl |= CPU_CONTROL_EX_BEND;
2826 #endif
2827
2828 int cpuctrlmask = cpuctrl
2829 | CPU_CONTROL_AFLT_ENABLE
2830 | CPU_CONTROL_VECRELOC;
2831
2832 #ifdef ARM11MPCORE_MMU_COMPAT
2833 /* XXX: S and R? */
2834 #endif
2835
2836 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2837 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2838 #endif
2839
2840 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2841
2842 #ifndef ARM_HAS_VBAR
2843 if (vector_page == ARM_VECTORS_HIGH)
2844 cpuctrl |= CPU_CONTROL_VECRELOC;
2845 #endif
2846
2847 /* Clear out the cache */
2848 cpu_idcache_wbinv_all();
2849
2850 /* Now really make sure they are clean. */
2851 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2852
2853 /* Allow detection code to find the VFP if it's fitted. */
2854 armreg_cpacr_write(0x0fffffff);
2855
2856 /* Set the control register */
2857 cpu_control(cpuctrlmask, cpuctrl);
2858
2859 /* And again. */
2860 cpu_idcache_wbinv_all();
2861 }
2862 #endif /* CPU_ARM11MPCORE */
2863
2864 #ifdef CPU_PJ4B
2865 void
pj4bv7_setup(char * args)2866 pj4bv7_setup(char *args)
2867 {
2868 int cpuctrl;
2869
2870 pj4b_config();
2871
2872 cpuctrl = CPU_CONTROL_MMU_ENABLE;
2873 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2874 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2875 #else
2876 cpuctrl |= CPU_CONTROL_UNAL_ENABLE;
2877 #endif
2878 cpuctrl |= CPU_CONTROL_DC_ENABLE;
2879 cpuctrl |= CPU_CONTROL_IC_ENABLE;
2880 cpuctrl |= (0xf << 3);
2881 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2882 cpuctrl |= (0x5 << 16);
2883 cpuctrl |= CPU_CONTROL_XP_ENABLE;
2884
2885 #ifndef ARM_HAS_VBAR
2886 if (vector_page == ARM_VECTORS_HIGH)
2887 cpuctrl |= CPU_CONTROL_VECRELOC;
2888 #endif
2889
2890 #ifdef L2CACHE_ENABLE
2891 /* Setup L2 cache */
2892 arm_scache.cache_type = CPU_CT_CTYPE_WT;
2893 arm_scache.cache_unified = 1;
2894 arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
2895 arm_scache.dcache_size = arm_scache.icache_size = ARMADAXP_L2_SIZE;
2896 arm_scache.dcache_ways = arm_scache.icache_ways = ARMADAXP_L2_WAYS;
2897 arm_scache.dcache_way_size = arm_scache.icache_way_size =
2898 ARMADAXP_L2_WAY_SIZE;
2899 arm_scache.dcache_line_size = arm_scache.icache_line_size =
2900 ARMADAXP_L2_LINE_SIZE;
2901 arm_scache.dcache_sets = arm_scache.icache_sets =
2902 ARMADAXP_L2_SETS;
2903
2904 cpufuncs.cf_sdcache_wbinv_range = armadaxp_sdcache_wbinv_range;
2905 cpufuncs.cf_sdcache_inv_range = armadaxp_sdcache_inv_range;
2906 cpufuncs.cf_sdcache_wb_range = armadaxp_sdcache_wb_range;
2907 #endif
2908
2909 #ifdef AURORA_IO_CACHE_COHERENCY
2910 /* use AMBA and I/O Coherency Fabric to maintain cache */
2911 cpufuncs.cf_dcache_wbinv_range = pj4b_dcache_cfu_wbinv_range;
2912 cpufuncs.cf_dcache_inv_range = pj4b_dcache_cfu_inv_range;
2913 cpufuncs.cf_dcache_wb_range = pj4b_dcache_cfu_wb_range;
2914
2915 cpufuncs.cf_sdcache_wbinv_range = (void *)cpufunc_nullop;
2916 cpufuncs.cf_sdcache_inv_range = (void *)cpufunc_nullop;
2917 cpufuncs.cf_sdcache_wb_range = (void *)cpufunc_nullop;
2918 #endif
2919
2920 /* Clear out the cache */
2921 cpu_idcache_wbinv_all();
2922
2923 /* Set the control register */
2924 cpu_control(0xffffffff, cpuctrl);
2925
2926 /* And again. */
2927 cpu_idcache_wbinv_all();
2928 #ifdef L2CACHE_ENABLE
2929 armadaxp_sdcache_wbinv_all();
2930 #endif
2931 }
2932 #endif /* CPU_PJ4B */
2933
2934 #if defined(CPU_ARMV7)
2935 struct cpu_option armv7_options[] = {
2936 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2937 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2938 { "armv7.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2939 { "armv7.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2940 { "armv7.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2941 { NULL, IGN, IGN, 0}
2942 };
2943
2944 void
armv7_setup(char * args)2945 armv7_setup(char *args)
2946 {
2947 int cpuctrl =
2948 CPU_CONTROL_MMU_ENABLE |
2949 CPU_CONTROL_IC_ENABLE |
2950 CPU_CONTROL_DC_ENABLE |
2951 CPU_CONTROL_BPRD_ENABLE |
2952 CPU_CONTROL_UNAL_ENABLE |
2953 0;
2954 #ifdef __ARMEB__
2955 cpuctrl |= CPU_CONTROL_EX_BEND;
2956 #endif
2957 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2958 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2959 #endif
2960 #ifdef ARM_MMU_EXTENDED
2961 cpuctrl |= CPU_CONTROL_XP_ENABLE;
2962 #endif
2963
2964 int cpuctrlmask = cpuctrl |
2965 CPU_CONTROL_EX_BEND |
2966 CPU_CONTROL_AFLT_ENABLE |
2967 CPU_CONTROL_TR_ENABLE |
2968 CPU_CONTROL_VECRELOC |
2969 CPU_CONTROL_XP_ENABLE |
2970 0;
2971
2972 cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl);
2973
2974 #ifndef ARM_HAS_VBAR
2975 if (vector_page == ARM_VECTORS_HIGH)
2976 cpuctrl |= CPU_CONTROL_VECRELOC;
2977 #endif
2978
2979 #ifdef __HAVE_GENERIC_START
2980 const u_int lcputype = cpufunc_id();
2981 int actlr_set = 0;
2982 int actlr_clr = 0;
2983
2984 if (CPU_ID_CORTEX_A5_P(lcputype)) {
2985 /*
2986 * Disable exclusive L1/L2 cache control
2987 * Enable SMP mode
2988 * Enable Cache and TLB maintenance broadcast
2989 */
2990 actlr_clr = CORTEXA5_ACTLR_EXCL;
2991 actlr_set = CORTEXA5_ACTLR_SMP | CORTEXA5_ACTLR_FW;
2992 } else if (CPU_ID_CORTEX_A7_P(lcputype)) {
2993 #ifdef MULTIPROCESSOR
2994 actlr_set |= CORTEXA7_ACTLR_SMP;
2995 #endif
2996 } else if (CPU_ID_CORTEX_A8_P(lcputype)) {
2997 actlr_set = CORTEXA8_ACTLR_L2EN;
2998 actlr_clr = CORTEXA8_ACTLR_L1ALIAS;
2999 } else if (CPU_ID_CORTEX_A9_P(lcputype)) {
3000 actlr_set =
3001 CORTEXA9_AUXCTL_FW |
3002 CORTEXA9_AUXCTL_L2PE | // Not in FreeBSD
3003 CORTEXA9_AUXCTL_SMP |
3004 0;
3005 } else if (CPU_ID_CORTEX_A15_P(lcputype)) {
3006 actlr_set =
3007 CORTEXA15_ACTLR_SMP |
3008 CORTEXA15_ACTLR_SDEH |
3009 0;
3010 } else if (CPU_ID_CORTEX_A12_P(lcputype) ||
3011 CPU_ID_CORTEX_A17_P(lcputype)) {
3012 actlr_set = CORTEXA17_ACTLR_SMP;
3013 uint32_t diagset = 0;
3014 const uint16_t varrev =
3015 __SHIFTIN(__SHIFTOUT(lcputype, CPU_ID_VARIANT_MASK), __BITS(7,4)) |
3016 __SHIFTIN(__SHIFTOUT(lcputype, CPU_ID_REVISION_MASK), __BITS(3,0)) |
3017 0;
3018 /* Errata 852421 exists upto r1p2 */
3019 if (varrev < 0x12) {
3020 diagset |= __BIT(24);
3021 }
3022 /* Errata 852423 exists upto r1p2 */
3023 if (varrev < 0x12) {
3024 diagset |= __BIT(12);
3025 }
3026 /* Errata 857272 */
3027 diagset |= __BITS(11,10);
3028
3029 const uint32_t dgnctlr1 = armreg_dgnctlr1_read();
3030 armreg_dgnctlr1_write(dgnctlr1 | diagset);
3031 } else if (CPU_ID_CORTEX_A53_P(lcputype)) {
3032 } else if (CPU_ID_CORTEX_A57_P(lcputype)) {
3033 } else if (CPU_ID_CORTEX_A72_P(lcputype)) {
3034 }
3035
3036 uint32_t actlr = armreg_auxctl_read();
3037 actlr &= ~actlr_clr;
3038 actlr |= actlr_set;
3039
3040 armreg_auxctl_write(actlr);
3041
3042 /* Set the control register - does dsb; isb */
3043 cpu_control(cpuctrlmask, cpuctrl);
3044
3045 /* does tlb and branch predictor flush, and dsb; isb */
3046 cpu_tlb_flushID();
3047 #else
3048 /* Set the control register - does dsb; isb */
3049 cpu_control(cpuctrlmask, cpuctrl);
3050 #endif
3051
3052 }
3053 #endif /* CPU_ARMV7 */
3054
3055
3056 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
3057 void
arm11x6_setup(char * args)3058 arm11x6_setup(char *args)
3059 {
3060 int cpuctrl, cpuctrl_wax;
3061 uint32_t auxctrl;
3062 uint32_t sbz=0;
3063 uint32_t cpuid;
3064
3065 cpuid = cpu_idnum();
3066
3067 cpuctrl =
3068 CPU_CONTROL_MMU_ENABLE |
3069 CPU_CONTROL_DC_ENABLE |
3070 CPU_CONTROL_WBUF_ENABLE |
3071 CPU_CONTROL_32BP_ENABLE |
3072 CPU_CONTROL_32BD_ENABLE |
3073 CPU_CONTROL_LABT_ENABLE |
3074 CPU_CONTROL_UNAL_ENABLE |
3075 #ifdef ARM_MMU_EXTENDED
3076 CPU_CONTROL_XP_ENABLE |
3077 #else
3078 CPU_CONTROL_SYST_ENABLE |
3079 #endif
3080 CPU_CONTROL_IC_ENABLE;
3081
3082 #ifdef __ARMEB__
3083 cpuctrl |= CPU_CONTROL_EX_BEND;
3084 #endif
3085
3086 /*
3087 * "write as existing" bits
3088 * inverse of this is mask
3089 */
3090 cpuctrl_wax =
3091 (3 << 30) |
3092 (1 << 29) |
3093 (1 << 28) |
3094 (3 << 26) |
3095 (3 << 19) |
3096 (1 << 17);
3097
3098 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3099 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3100 #endif
3101
3102 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
3103
3104 #ifndef ARM_HAS_VBAR
3105 if (vector_page == ARM_VECTORS_HIGH)
3106 cpuctrl |= CPU_CONTROL_VECRELOC;
3107 #endif
3108
3109 auxctrl = armreg_auxctl_read();
3110 /*
3111 * This options enables the workaround for the 364296 ARM1136
3112 * r0pX errata (possible cache data corruption with
3113 * hit-under-miss enabled). It sets the undocumented bit 31 in
3114 * the auxiliary control register and the FI bit in the control
3115 * register, thus disabling hit-under-miss without putting the
3116 * processor into full low interrupt latency mode. ARM11MPCore
3117 * is not affected.
3118 */
3119 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
3120 cpuctrl |= CPU_CONTROL_FI_ENABLE;
3121 auxctrl |= ARM1136_AUXCTL_PFI;
3122 }
3123
3124 /*
3125 * This enables the workaround for the following ARM1176 r0pX
3126 * errata.
3127 *
3128 * 394601: In low interrupt latency configuration, interrupted clean
3129 * and invalidate operation may not clean dirty data.
3130 *
3131 * 716151: Clean Data Cache line by MVA can corrupt subsequent
3132 * stores to the same cache line.
3133 *
3134 * 714068: Prefetch Instruction Cache Line or Invalidate Instruction
3135 * Cache Line by MVA can cause deadlock.
3136 */
3137 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
3138 /* 394601 and 716151 */
3139 cpuctrl |= CPU_CONTROL_FI_ENABLE;
3140 auxctrl |= ARM1176_AUXCTL_FIO;
3141
3142 /* 714068 */
3143 auxctrl |= ARM1176_AUXCTL_PHD;
3144 }
3145
3146 /* Clear out the cache */
3147 cpu_idcache_wbinv_all();
3148
3149 /* Now really make sure they are clean. */
3150 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
3151
3152 /* Allow detection code to find the VFP if it's fitted. */
3153 armreg_cpacr_write(0x0fffffff);
3154
3155 /* Set the control register */
3156 cpu_control(~cpuctrl_wax, cpuctrl);
3157
3158 /* Update auxctlr */
3159 armreg_auxctl_write(auxctrl);
3160
3161 /* And again. */
3162 cpu_idcache_wbinv_all();
3163 }
3164 #endif /* CPU_ARM1136 || CPU_ARM1176 */
3165
3166 #ifdef CPU_SA110
3167 struct cpu_option sa110_options[] = {
3168 #ifdef COMPAT_12
3169 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3170 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3171 #endif /* COMPAT_12 */
3172 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3173 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3174 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3175 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3176 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3177 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3178 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3179 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3180 { NULL, IGN, IGN, 0 }
3181 };
3182
3183 void
sa110_setup(char * args)3184 sa110_setup(char *args)
3185 {
3186 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3187 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3188 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3189 | CPU_CONTROL_WBUF_ENABLE;
3190 #if 0
3191 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3192 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3193 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3194 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3195 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3196 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3197 | CPU_CONTROL_CPCLK;
3198 #endif
3199
3200 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3201 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3202 #endif
3203
3204 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
3205
3206 #ifdef __ARMEB__
3207 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3208 #endif
3209
3210 #ifndef ARM_HAS_VBAR
3211 if (vector_page == ARM_VECTORS_HIGH)
3212 cpuctrl |= CPU_CONTROL_VECRELOC;
3213 #endif
3214
3215 /* Clear out the cache */
3216 cpu_idcache_wbinv_all();
3217
3218 /* Set the control register */
3219 #if 0
3220 cpu_control(cpuctrlmask, cpuctrl);
3221 #endif
3222 cpu_control(0xffffffff, cpuctrl);
3223
3224 /*
3225 * enable clockswitching, note that this doesn't read or write to r0,
3226 * r0 is just to make it valid asm
3227 */
3228 __asm volatile ("mcr p15, 0, r0, c15, c1, 2");
3229 }
3230 #endif /* CPU_SA110 */
3231
3232 #if defined(CPU_SA1100) || defined(CPU_SA1110)
3233 struct cpu_option sa11x0_options[] = {
3234 #ifdef COMPAT_12
3235 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3236 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3237 #endif /* COMPAT_12 */
3238 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3239 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3240 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3241 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3242 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3243 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3244 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3245 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3246 { NULL, IGN, IGN, 0 }
3247 };
3248
3249 void
sa11x0_setup(char * args)3250 sa11x0_setup(char *args)
3251 {
3252
3253 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3254 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3255 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3256 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3257 #if 0
3258 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3259 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3260 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3261 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3262 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3263 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3264 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3265 #endif
3266
3267 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3268 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3269 #endif
3270
3271 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
3272
3273 #ifdef __ARMEB__
3274 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3275 #endif
3276
3277 #ifndef ARM_HAS_VBAR
3278 if (vector_page == ARM_VECTORS_HIGH)
3279 cpuctrl |= CPU_CONTROL_VECRELOC;
3280 #endif
3281
3282 /* Clear out the cache */
3283 cpu_idcache_wbinv_all();
3284
3285 /* Set the control register */
3286 cpu_control(0xffffffff, cpuctrl);
3287 }
3288 #endif /* CPU_SA1100 || CPU_SA1110 */
3289
3290 #if defined(CPU_FA526)
3291 struct cpu_option fa526_options[] = {
3292 #ifdef COMPAT_12
3293 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3294 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
3295 #endif /* COMPAT_12 */
3296 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3297 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3298 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3299 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3300 { NULL, IGN, IGN, 0 }
3301 };
3302
3303 void
fa526_setup(char * args)3304 fa526_setup(char *args)
3305 {
3306
3307 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3308 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3309 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3310 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
3311 #if 0
3312 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3313 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3314 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3315 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3316 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3317 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3318 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3319 #endif
3320
3321 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3322 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3323 #endif
3324
3325 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
3326
3327 #ifdef __ARMEB__
3328 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3329 #endif
3330
3331 #ifndef ARM_HAS_VBAR
3332 if (vector_page == ARM_VECTORS_HIGH)
3333 cpuctrl |= CPU_CONTROL_VECRELOC;
3334 #endif
3335
3336 /* Clear out the cache */
3337 cpu_idcache_wbinv_all();
3338
3339 /* Set the control register */
3340 cpu_control(0xffffffff, cpuctrl);
3341 }
3342 #endif /* CPU_FA526 */
3343
3344 #if defined(CPU_IXP12X0)
3345 struct cpu_option ixp12x0_options[] = {
3346 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3347 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3348 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3349 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3350 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3351 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3352 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3353 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3354 { NULL, IGN, IGN, 0 }
3355 };
3356
3357 void
ixp12x0_setup(char * args)3358 ixp12x0_setup(char *args)
3359 {
3360
3361 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
3362 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
3363 | CPU_CONTROL_IC_ENABLE;
3364
3365 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
3366 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
3367 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
3368 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
3369 | CPU_CONTROL_VECRELOC;
3370
3371 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3372 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3373 #endif
3374
3375 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
3376
3377 #ifdef __ARMEB__
3378 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3379 #endif
3380
3381 #ifndef ARM_HAS_VBAR
3382 if (vector_page == ARM_VECTORS_HIGH)
3383 cpuctrl |= CPU_CONTROL_VECRELOC;
3384 #endif
3385
3386 /* Clear out the cache */
3387 cpu_idcache_wbinv_all();
3388
3389 /* Set the control register */
3390 /* cpu_control(0xffffffff, cpuctrl); */
3391 cpu_control(cpuctrlmask, cpuctrl);
3392 }
3393 #endif /* CPU_IXP12X0 */
3394
3395 #if defined(CPU_XSCALE)
3396 struct cpu_option xscale_options[] = {
3397 #ifdef COMPAT_12
3398 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
3399 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3400 #endif /* COMPAT_12 */
3401 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
3402 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3403 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3404 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
3405 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3406 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3407 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3408 { NULL, IGN, IGN, 0 }
3409 };
3410
3411 void
xscale_setup(char * args)3412 xscale_setup(char *args)
3413 {
3414 uint32_t auxctl;
3415
3416 /*
3417 * The XScale Write Buffer is always enabled. Our option
3418 * is to enable/disable coalescing. Note that bits 6:3
3419 * must always be enabled.
3420 */
3421
3422 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3423 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3424 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3425 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
3426 | CPU_CONTROL_BPRD_ENABLE;
3427 #if 0
3428 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
3429 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
3430 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3431 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3432 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3433 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
3434 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
3435 #endif
3436
3437 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3438 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3439 #endif
3440
3441 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
3442
3443 #ifdef __ARMEB__
3444 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3445 #endif
3446
3447 #ifndef ARM_HAS_VBAR
3448 if (vector_page == ARM_VECTORS_HIGH)
3449 cpuctrl |= CPU_CONTROL_VECRELOC;
3450 #endif
3451
3452 /* Clear out the cache */
3453 cpu_idcache_wbinv_all();
3454
3455 /*
3456 * Set the control register. Note that bits 6:3 must always
3457 * be set to 1.
3458 */
3459 #if 0
3460 cpu_control(cpuctrlmask, cpuctrl);
3461 #endif
3462 cpu_control(0xffffffff, cpuctrl);
3463
3464 /* Make sure write coalescing is turned on */
3465 auxctl = armreg_auxctl_read();
3466 #ifdef XSCALE_NO_COALESCE_WRITES
3467 auxctl |= XSCALE_AUXCTL_K;
3468 #else
3469 auxctl &= ~XSCALE_AUXCTL_K;
3470 #endif
3471 armreg_auxctl_write(auxctl);
3472 }
3473 #endif /* CPU_XSCALE */
3474
3475 #if defined(CPU_SHEEVA)
3476 struct cpu_option sheeva_options[] = {
3477 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3478 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3479 { "sheeva.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
3480 { "sheeva.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
3481 { "sheeva.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
3482 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3483 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
3484 { "sheeva.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
3485 { NULL, IGN, IGN, 0 }
3486 };
3487
3488 void
sheeva_setup(char * args)3489 sheeva_setup(char *args)
3490 {
3491 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3492 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3493 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
3494 #if 0
3495 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
3496 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
3497 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
3498 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
3499 | CPU_CONTROL_BPRD_ENABLE
3500 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
3501 #endif
3502
3503 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
3504 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
3505 #endif
3506
3507 cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl);
3508
3509 /* Enable DCache Streaming Switch and Write Allocate */
3510 uint32_t sheeva_ext = armreg_sheeva_xctrl_read();
3511
3512 sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN;
3513 #ifdef SHEEVA_L2_CACHE
3514 sheeva_ext |= FC_L2CACHE_EN;
3515 sheeva_ext &= ~FC_L2_PREF_DIS;
3516 #endif
3517
3518 armreg_sheeva_xctrl_write(sheeva_ext);
3519
3520 #ifdef SHEEVA_L2_CACHE
3521 #ifndef SHEEVA_L2_CACHE_WT
3522 arm_scache.cache_type = CPU_CT_CTYPE_WB2;
3523 #elif CPU_CT_CTYPE_WT != 0
3524 arm_scache.cache_type = CPU_CT_CTYPE_WT;
3525 #endif
3526 arm_scache.cache_unified = 1;
3527 arm_scache.dcache_type = arm_scache.icache_type = CACHE_TYPE_PIPT;
3528 arm_scache.dcache_size = arm_scache.icache_size = 256*1024;
3529 arm_scache.dcache_ways = arm_scache.icache_ways = 4;
3530 arm_scache.dcache_way_size = arm_scache.icache_way_size =
3531 arm_scache.dcache_size / arm_scache.dcache_ways;
3532 arm_scache.dcache_line_size = arm_scache.icache_line_size = 32;
3533 arm_scache.dcache_sets = arm_scache.icache_sets =
3534 arm_scache.dcache_way_size / arm_scache.dcache_line_size;
3535
3536 cpufuncs.cf_sdcache_wb_range = sheeva_sdcache_wb_range;
3537 cpufuncs.cf_sdcache_inv_range = sheeva_sdcache_inv_range;
3538 cpufuncs.cf_sdcache_wbinv_range = sheeva_sdcache_wbinv_range;
3539 #endif /* SHEEVA_L2_CACHE */
3540
3541 #ifdef __ARMEB__
3542 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
3543 #endif
3544
3545 #ifndef ARM_HAS_VBAR
3546 if (vector_page == ARM_VECTORS_HIGH)
3547 cpuctrl |= CPU_CONTROL_VECRELOC;
3548 #endif
3549
3550 /* Clear out the cache */
3551 cpu_idcache_wbinv_all();
3552
3553 /* Now really make sure they are clean. */
3554 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
3555
3556 /* Set the control register */
3557 cpu_control(0xffffffff, cpuctrl);
3558
3559 /* And again. */
3560 cpu_idcache_wbinv_all();
3561 #ifdef SHEEVA_L2_CACHE
3562 sheeva_sdcache_wbinv_all();
3563 #endif
3564 }
3565 #endif /* CPU_SHEEVA */
3566
3567 bool
cpu_gtmr_exists_p(void)3568 cpu_gtmr_exists_p(void)
3569 {
3570 return armreg_pfr1_read() & ARM_PFR1_GTIMER_MASK;
3571 }
3572
3573 u_int
cpu_clusterid(void)3574 cpu_clusterid(void)
3575 {
3576 return __SHIFTOUT(armreg_mpidr_read(), MPIDR_AFF1);
3577 }
3578
3579 bool
cpu_earlydevice_va_p(void)3580 cpu_earlydevice_va_p(void)
3581 {
3582 const bool mmu_enabled_p =
3583 armreg_sctlr_read() & CPU_CONTROL_MMU_ENABLE;
3584
3585 if (!mmu_enabled_p)
3586 return false;
3587
3588 /* Don't access cpu_ttb unless the mmu is enabled */
3589 const bool cpul1pt_p =
3590 ((armreg_ttbr_read() & -L1_TABLE_SIZE) == cpu_ttb) ||
3591 ((armreg_ttbr1_read() & -L1_TABLE_SIZE) == cpu_ttb);
3592
3593 return cpul1pt_p;
3594 }
3595