1 /* $NetBSD: cpufunc.c,v 1.54 2002/08/20 02:30:51 briggs Exp $ */ 2 3 /* 4 * arm7tdmi support code Copyright (c) 2001 John Fremlin 5 * arm8 support code Copyright (c) 1997 ARM Limited 6 * arm8 support code Copyright (c) 1997 Causality Limited 7 * arm9 support code Copyright (C) 2001 ARM Ltd 8 * Copyright (c) 1997 Mark Brinicombe. 9 * Copyright (c) 1997 Causality Limited 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Causality Limited. 23 * 4. The name of Causality Limited may not be used to endorse or promote 24 * products derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * RiscBSD kernel project 40 * 41 * cpufuncs.c 42 * 43 * C functions for supporting CPU / MMU / TLB specific operations. 44 * 45 * Created : 30/01/97 46 */ 47 48 #include "opt_compat_netbsd.h" 49 #include "opt_cpuoptions.h" 50 #include "opt_perfctrs.h" 51 52 #include <sys/types.h> 53 #include <sys/param.h> 54 #include <sys/pmc.h> 55 #include <sys/systm.h> 56 #include <machine/cpu.h> 57 #include <machine/bootconfig.h> 58 #include <arch/arm/arm/disassem.h> 59 60 #include <uvm/uvm.h> 61 62 #include <arm/cpuconf.h> 63 #include <arm/cpufunc.h> 64 65 #ifdef CPU_XSCALE_80200 66 #include <arm/xscale/i80200reg.h> 67 #include <arm/xscale/i80200var.h> 68 #endif 69 70 #ifdef CPU_XSCALE_80321 71 #include <arm/xscale/i80321reg.h> 72 #include <arm/xscale/i80321var.h> 73 #endif 74 75 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) 76 #include <arm/xscale/xscalereg.h> 77 #endif 78 79 #if defined(PERFCTRS) 80 struct arm_pmc_funcs *arm_pmc; 81 #endif 82 83 /* PRIMARY CACHE VARIABLES */ 84 int arm_picache_size; 85 int arm_picache_line_size; 86 int arm_picache_ways; 87 88 int arm_pdcache_size; /* and unified */ 89 int arm_pdcache_line_size; 90 int arm_pdcache_ways; 91 92 int arm_pcache_type; 93 int arm_pcache_unified; 94 95 int arm_dcache_align; 96 int arm_dcache_align_mask; 97 98 /* 1 == use cpu_sleep(), 0 == don't */ 99 int cpu_do_powersave; 100 101 #ifdef CPU_ARM3 102 struct cpu_functions arm3_cpufuncs = { 103 /* CPU functions */ 104 105 cpufunc_id, /* id */ 106 cpufunc_nullop, /* cpwait */ 107 108 /* MMU functions */ 109 110 arm3_control, /* control */ 111 NULL, /* domain */ 112 NULL, /* setttb */ 113 NULL, /* faultstatus */ 114 NULL, /* faultaddress */ 115 116 /* TLB functions */ 117 118 cpufunc_nullop, /* tlb_flushID */ 119 (void *)cpufunc_nullop, /* tlb_flushID_SE */ 120 cpufunc_nullop, /* tlb_flushI */ 121 (void *)cpufunc_nullop, /* tlb_flushI_SE */ 122 cpufunc_nullop, /* tlb_flushD */ 123 (void *)cpufunc_nullop, /* tlb_flushD_SE */ 124 125 /* Cache operations */ 126 127 cpufunc_nullop, /* icache_sync_all */ 128 (void *) cpufunc_nullop, /* icache_sync_range */ 129 130 arm3_cache_flush, /* dcache_wbinv_all */ 131 (void *)arm3_cache_flush, /* dcache_wbinv_range */ 132 (void *)arm3_cache_flush, /* dcache_inv_range */ 133 (void *)cpufunc_nullop, /* dcache_wb_range */ 134 135 arm3_cache_flush, /* idcache_wbinv_all */ 136 (void *)arm3_cache_flush, /* idcache_wbinv_range */ 137 138 /* Other functions */ 139 140 cpufunc_nullop, /* flush_prefetchbuf */ 141 cpufunc_nullop, /* drain_writebuf */ 142 cpufunc_nullop, /* flush_brnchtgt_C */ 143 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 144 145 (void *)cpufunc_nullop, /* sleep */ 146 147 /* Soft functions */ 148 149 early_abort_fixup, /* dataabt_fixup */ 150 cpufunc_null_fixup, /* prefetchabt_fixup */ 151 152 NULL, /* context_switch */ 153 154 (void *)cpufunc_nullop /* cpu setup */ 155 156 }; 157 #endif /* CPU_ARM3 */ 158 159 #ifdef CPU_ARM6 160 struct cpu_functions arm6_cpufuncs = { 161 /* CPU functions */ 162 163 cpufunc_id, /* id */ 164 cpufunc_nullop, /* cpwait */ 165 166 /* MMU functions */ 167 168 cpufunc_control, /* control */ 169 cpufunc_domains, /* domain */ 170 arm67_setttb, /* setttb */ 171 cpufunc_faultstatus, /* faultstatus */ 172 cpufunc_faultaddress, /* faultaddress */ 173 174 /* TLB functions */ 175 176 arm67_tlb_flush, /* tlb_flushID */ 177 arm67_tlb_purge, /* tlb_flushID_SE */ 178 arm67_tlb_flush, /* tlb_flushI */ 179 arm67_tlb_purge, /* tlb_flushI_SE */ 180 arm67_tlb_flush, /* tlb_flushD */ 181 arm67_tlb_purge, /* tlb_flushD_SE */ 182 183 /* Cache operations */ 184 185 cpufunc_nullop, /* icache_sync_all */ 186 (void *) cpufunc_nullop, /* icache_sync_range */ 187 188 arm67_cache_flush, /* dcache_wbinv_all */ 189 (void *)arm67_cache_flush, /* dcache_wbinv_range */ 190 (void *)arm67_cache_flush, /* dcache_inv_range */ 191 (void *)cpufunc_nullop, /* dcache_wb_range */ 192 193 arm67_cache_flush, /* idcache_wbinv_all */ 194 (void *)arm67_cache_flush, /* idcache_wbinv_range */ 195 196 /* Other functions */ 197 198 cpufunc_nullop, /* flush_prefetchbuf */ 199 cpufunc_nullop, /* drain_writebuf */ 200 cpufunc_nullop, /* flush_brnchtgt_C */ 201 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 202 203 (void *)cpufunc_nullop, /* sleep */ 204 205 /* Soft functions */ 206 207 #ifdef ARM6_LATE_ABORT 208 late_abort_fixup, /* dataabt_fixup */ 209 #else 210 early_abort_fixup, /* dataabt_fixup */ 211 #endif 212 cpufunc_null_fixup, /* prefetchabt_fixup */ 213 214 arm67_context_switch, /* context_switch */ 215 216 arm6_setup /* cpu setup */ 217 218 }; 219 #endif /* CPU_ARM6 */ 220 221 #ifdef CPU_ARM7 222 struct cpu_functions arm7_cpufuncs = { 223 /* CPU functions */ 224 225 cpufunc_id, /* id */ 226 cpufunc_nullop, /* cpwait */ 227 228 /* MMU functions */ 229 230 cpufunc_control, /* control */ 231 cpufunc_domains, /* domain */ 232 arm67_setttb, /* setttb */ 233 cpufunc_faultstatus, /* faultstatus */ 234 cpufunc_faultaddress, /* faultaddress */ 235 236 /* TLB functions */ 237 238 arm67_tlb_flush, /* tlb_flushID */ 239 arm67_tlb_purge, /* tlb_flushID_SE */ 240 arm67_tlb_flush, /* tlb_flushI */ 241 arm67_tlb_purge, /* tlb_flushI_SE */ 242 arm67_tlb_flush, /* tlb_flushD */ 243 arm67_tlb_purge, /* tlb_flushD_SE */ 244 245 /* Cache operations */ 246 247 cpufunc_nullop, /* icache_sync_all */ 248 (void *)cpufunc_nullop, /* icache_sync_range */ 249 250 arm67_cache_flush, /* dcache_wbinv_all */ 251 (void *)arm67_cache_flush, /* dcache_wbinv_range */ 252 (void *)arm67_cache_flush, /* dcache_inv_range */ 253 (void *)cpufunc_nullop, /* dcache_wb_range */ 254 255 arm67_cache_flush, /* idcache_wbinv_all */ 256 (void *)arm67_cache_flush, /* idcache_wbinv_range */ 257 258 /* Other functions */ 259 260 cpufunc_nullop, /* flush_prefetchbuf */ 261 cpufunc_nullop, /* drain_writebuf */ 262 cpufunc_nullop, /* flush_brnchtgt_C */ 263 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 264 265 (void *)cpufunc_nullop, /* sleep */ 266 267 /* Soft functions */ 268 269 late_abort_fixup, /* dataabt_fixup */ 270 cpufunc_null_fixup, /* prefetchabt_fixup */ 271 272 arm67_context_switch, /* context_switch */ 273 274 arm7_setup /* cpu setup */ 275 276 }; 277 #endif /* CPU_ARM7 */ 278 279 #ifdef CPU_ARM7TDMI 280 struct cpu_functions arm7tdmi_cpufuncs = { 281 /* CPU functions */ 282 283 cpufunc_id, /* id */ 284 cpufunc_nullop, /* cpwait */ 285 286 /* MMU functions */ 287 288 cpufunc_control, /* control */ 289 cpufunc_domains, /* domain */ 290 arm7tdmi_setttb, /* setttb */ 291 cpufunc_faultstatus, /* faultstatus */ 292 cpufunc_faultaddress, /* faultaddress */ 293 294 /* TLB functions */ 295 296 arm7tdmi_tlb_flushID, /* tlb_flushID */ 297 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */ 298 arm7tdmi_tlb_flushID, /* tlb_flushI */ 299 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */ 300 arm7tdmi_tlb_flushID, /* tlb_flushD */ 301 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */ 302 303 /* Cache operations */ 304 305 cpufunc_nullop, /* icache_sync_all */ 306 (void *)cpufunc_nullop, /* icache_sync_range */ 307 308 arm7tdmi_cache_flushID, /* dcache_wbinv_all */ 309 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */ 310 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */ 311 (void *)cpufunc_nullop, /* dcache_wb_range */ 312 313 arm7tdmi_cache_flushID, /* idcache_wbinv_all */ 314 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */ 315 316 /* Other functions */ 317 318 cpufunc_nullop, /* flush_prefetchbuf */ 319 cpufunc_nullop, /* drain_writebuf */ 320 cpufunc_nullop, /* flush_brnchtgt_C */ 321 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 322 323 (void *)cpufunc_nullop, /* sleep */ 324 325 /* Soft functions */ 326 327 late_abort_fixup, /* dataabt_fixup */ 328 cpufunc_null_fixup, /* prefetchabt_fixup */ 329 330 arm7tdmi_context_switch, /* context_switch */ 331 332 arm7tdmi_setup /* cpu setup */ 333 334 }; 335 #endif /* CPU_ARM7TDMI */ 336 337 #ifdef CPU_ARM8 338 struct cpu_functions arm8_cpufuncs = { 339 /* CPU functions */ 340 341 cpufunc_id, /* id */ 342 cpufunc_nullop, /* cpwait */ 343 344 /* MMU functions */ 345 346 cpufunc_control, /* control */ 347 cpufunc_domains, /* domain */ 348 arm8_setttb, /* setttb */ 349 cpufunc_faultstatus, /* faultstatus */ 350 cpufunc_faultaddress, /* faultaddress */ 351 352 /* TLB functions */ 353 354 arm8_tlb_flushID, /* tlb_flushID */ 355 arm8_tlb_flushID_SE, /* tlb_flushID_SE */ 356 arm8_tlb_flushID, /* tlb_flushI */ 357 arm8_tlb_flushID_SE, /* tlb_flushI_SE */ 358 arm8_tlb_flushID, /* tlb_flushD */ 359 arm8_tlb_flushID_SE, /* tlb_flushD_SE */ 360 361 /* Cache operations */ 362 363 cpufunc_nullop, /* icache_sync_all */ 364 (void *)cpufunc_nullop, /* icache_sync_range */ 365 366 arm8_cache_purgeID, /* dcache_wbinv_all */ 367 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */ 368 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */ 369 (void *)arm8_cache_cleanID, /* dcache_wb_range */ 370 371 arm8_cache_purgeID, /* idcache_wbinv_all */ 372 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */ 373 374 /* Other functions */ 375 376 cpufunc_nullop, /* flush_prefetchbuf */ 377 cpufunc_nullop, /* drain_writebuf */ 378 cpufunc_nullop, /* flush_brnchtgt_C */ 379 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 380 381 (void *)cpufunc_nullop, /* sleep */ 382 383 /* Soft functions */ 384 385 cpufunc_null_fixup, /* dataabt_fixup */ 386 cpufunc_null_fixup, /* prefetchabt_fixup */ 387 388 arm8_context_switch, /* context_switch */ 389 390 arm8_setup /* cpu setup */ 391 }; 392 #endif /* CPU_ARM8 */ 393 394 #ifdef CPU_ARM9 395 struct cpu_functions arm9_cpufuncs = { 396 /* CPU functions */ 397 398 cpufunc_id, /* id */ 399 cpufunc_nullop, /* cpwait */ 400 401 /* MMU functions */ 402 403 cpufunc_control, /* control */ 404 cpufunc_domains, /* Domain */ 405 arm9_setttb, /* Setttb */ 406 cpufunc_faultstatus, /* Faultstatus */ 407 cpufunc_faultaddress, /* Faultaddress */ 408 409 /* TLB functions */ 410 411 armv4_tlb_flushID, /* tlb_flushID */ 412 arm9_tlb_flushID_SE, /* tlb_flushID_SE */ 413 armv4_tlb_flushI, /* tlb_flushI */ 414 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ 415 armv4_tlb_flushD, /* tlb_flushD */ 416 armv4_tlb_flushD_SE, /* tlb_flushD_SE */ 417 418 /* Cache operations */ 419 420 arm9_cache_syncI, /* icache_sync_all */ 421 arm9_cache_syncI_rng, /* icache_sync_range */ 422 423 /* ...cache in write-though mode... */ 424 arm9_cache_flushD, /* dcache_wbinv_all */ 425 arm9_cache_flushD_rng, /* dcache_wbinv_range */ 426 arm9_cache_flushD_rng, /* dcache_inv_range */ 427 (void *)cpufunc_nullop, /* dcache_wb_range */ 428 429 arm9_cache_flushID, /* idcache_wbinv_all */ 430 arm9_cache_flushID_rng, /* idcache_wbinv_range */ 431 432 /* Other functions */ 433 434 cpufunc_nullop, /* flush_prefetchbuf */ 435 armv4_drain_writebuf, /* drain_writebuf */ 436 cpufunc_nullop, /* flush_brnchtgt_C */ 437 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 438 439 (void *)cpufunc_nullop, /* sleep */ 440 441 /* Soft functions */ 442 443 cpufunc_null_fixup, /* dataabt_fixup */ 444 cpufunc_null_fixup, /* prefetchabt_fixup */ 445 446 arm9_context_switch, /* context_switch */ 447 448 arm9_setup /* cpu setup */ 449 450 }; 451 #endif /* CPU_ARM9 */ 452 453 #ifdef CPU_SA110 454 struct cpu_functions sa110_cpufuncs = { 455 /* CPU functions */ 456 457 cpufunc_id, /* id */ 458 cpufunc_nullop, /* cpwait */ 459 460 /* MMU functions */ 461 462 cpufunc_control, /* control */ 463 cpufunc_domains, /* domain */ 464 sa1_setttb, /* setttb */ 465 cpufunc_faultstatus, /* faultstatus */ 466 cpufunc_faultaddress, /* faultaddress */ 467 468 /* TLB functions */ 469 470 armv4_tlb_flushID, /* tlb_flushID */ 471 sa1_tlb_flushID_SE, /* tlb_flushID_SE */ 472 armv4_tlb_flushI, /* tlb_flushI */ 473 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ 474 armv4_tlb_flushD, /* tlb_flushD */ 475 armv4_tlb_flushD_SE, /* tlb_flushD_SE */ 476 477 /* Cache operations */ 478 479 sa1_cache_syncI, /* icache_sync_all */ 480 sa1_cache_syncI_rng, /* icache_sync_range */ 481 482 sa1_cache_purgeD, /* dcache_wbinv_all */ 483 sa1_cache_purgeD_rng, /* dcache_wbinv_range */ 484 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */ 485 sa1_cache_cleanD_rng, /* dcache_wb_range */ 486 487 sa1_cache_purgeID, /* idcache_wbinv_all */ 488 sa1_cache_purgeID_rng, /* idcache_wbinv_range */ 489 490 /* Other functions */ 491 492 cpufunc_nullop, /* flush_prefetchbuf */ 493 armv4_drain_writebuf, /* drain_writebuf */ 494 cpufunc_nullop, /* flush_brnchtgt_C */ 495 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 496 497 (void *)cpufunc_nullop, /* sleep */ 498 499 /* Soft functions */ 500 501 cpufunc_null_fixup, /* dataabt_fixup */ 502 cpufunc_null_fixup, /* prefetchabt_fixup */ 503 504 sa110_context_switch, /* context_switch */ 505 506 sa110_setup /* cpu setup */ 507 }; 508 #endif /* CPU_SA110 */ 509 510 #if defined(CPU_SA1100) || defined(CPU_SA1110) 511 struct cpu_functions sa11x0_cpufuncs = { 512 /* CPU functions */ 513 514 cpufunc_id, /* id */ 515 cpufunc_nullop, /* cpwait */ 516 517 /* MMU functions */ 518 519 cpufunc_control, /* control */ 520 cpufunc_domains, /* domain */ 521 sa1_setttb, /* setttb */ 522 cpufunc_faultstatus, /* faultstatus */ 523 cpufunc_faultaddress, /* faultaddress */ 524 525 /* TLB functions */ 526 527 armv4_tlb_flushID, /* tlb_flushID */ 528 sa1_tlb_flushID_SE, /* tlb_flushID_SE */ 529 armv4_tlb_flushI, /* tlb_flushI */ 530 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ 531 armv4_tlb_flushD, /* tlb_flushD */ 532 armv4_tlb_flushD_SE, /* tlb_flushD_SE */ 533 534 /* Cache operations */ 535 536 sa1_cache_syncI, /* icache_sync_all */ 537 sa1_cache_syncI_rng, /* icache_sync_range */ 538 539 sa1_cache_purgeD, /* dcache_wbinv_all */ 540 sa1_cache_purgeD_rng, /* dcache_wbinv_range */ 541 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */ 542 sa1_cache_cleanD_rng, /* dcache_wb_range */ 543 544 sa1_cache_purgeID, /* idcache_wbinv_all */ 545 sa1_cache_purgeID_rng, /* idcache_wbinv_range */ 546 547 /* Other functions */ 548 549 sa11x0_drain_readbuf, /* flush_prefetchbuf */ 550 armv4_drain_writebuf, /* drain_writebuf */ 551 cpufunc_nullop, /* flush_brnchtgt_C */ 552 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 553 554 sa11x0_cpu_sleep, /* sleep */ 555 556 /* Soft functions */ 557 558 cpufunc_null_fixup, /* dataabt_fixup */ 559 cpufunc_null_fixup, /* prefetchabt_fixup */ 560 561 sa11x0_context_switch, /* context_switch */ 562 563 sa11x0_setup /* cpu setup */ 564 }; 565 #endif /* CPU_SA1100 || CPU_SA1110 */ 566 567 #ifdef CPU_IXP12X0 568 struct cpu_functions ixp12x0_cpufuncs = { 569 /* CPU functions */ 570 571 cpufunc_id, /* id */ 572 cpufunc_nullop, /* cpwait */ 573 574 /* MMU functions */ 575 576 cpufunc_control, /* control */ 577 cpufunc_domains, /* domain */ 578 sa1_setttb, /* setttb */ 579 cpufunc_faultstatus, /* faultstatus */ 580 cpufunc_faultaddress, /* faultaddress */ 581 582 /* TLB functions */ 583 584 armv4_tlb_flushID, /* tlb_flushID */ 585 sa1_tlb_flushID_SE, /* tlb_flushID_SE */ 586 armv4_tlb_flushI, /* tlb_flushI */ 587 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ 588 armv4_tlb_flushD, /* tlb_flushD */ 589 armv4_tlb_flushD_SE, /* tlb_flushD_SE */ 590 591 /* Cache operations */ 592 593 sa1_cache_syncI, /* icache_sync_all */ 594 sa1_cache_syncI_rng, /* icache_sync_range */ 595 596 sa1_cache_purgeD, /* dcache_wbinv_all */ 597 sa1_cache_purgeD_rng, /* dcache_wbinv_range */ 598 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */ 599 sa1_cache_cleanD_rng, /* dcache_wb_range */ 600 601 sa1_cache_purgeID, /* idcache_wbinv_all */ 602 sa1_cache_purgeID_rng, /* idcache_wbinv_range */ 603 604 /* Other functions */ 605 606 ixp12x0_drain_readbuf, /* flush_prefetchbuf */ 607 armv4_drain_writebuf, /* drain_writebuf */ 608 cpufunc_nullop, /* flush_brnchtgt_C */ 609 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 610 611 (void *)cpufunc_nullop, /* sleep */ 612 613 /* Soft functions */ 614 615 cpufunc_null_fixup, /* dataabt_fixup */ 616 cpufunc_null_fixup, /* prefetchabt_fixup */ 617 618 ixp12x0_context_switch, /* context_switch */ 619 620 ixp12x0_setup /* cpu setup */ 621 }; 622 #endif /* CPU_IXP12X0 */ 623 624 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 625 defined(CPU_XSCALE_PXA2X0) 626 struct cpu_functions xscale_cpufuncs = { 627 /* CPU functions */ 628 629 cpufunc_id, /* id */ 630 xscale_cpwait, /* cpwait */ 631 632 /* MMU functions */ 633 634 xscale_control, /* control */ 635 cpufunc_domains, /* domain */ 636 xscale_setttb, /* setttb */ 637 cpufunc_faultstatus, /* faultstatus */ 638 cpufunc_faultaddress, /* faultaddress */ 639 640 /* TLB functions */ 641 642 armv4_tlb_flushID, /* tlb_flushID */ 643 xscale_tlb_flushID_SE, /* tlb_flushID_SE */ 644 armv4_tlb_flushI, /* tlb_flushI */ 645 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ 646 armv4_tlb_flushD, /* tlb_flushD */ 647 armv4_tlb_flushD_SE, /* tlb_flushD_SE */ 648 649 /* Cache operations */ 650 651 xscale_cache_syncI, /* icache_sync_all */ 652 xscale_cache_syncI_rng, /* icache_sync_range */ 653 654 xscale_cache_purgeD, /* dcache_wbinv_all */ 655 xscale_cache_purgeD_rng, /* dcache_wbinv_range */ 656 xscale_cache_flushD_rng, /* dcache_inv_range */ 657 xscale_cache_cleanD_rng, /* dcache_wb_range */ 658 659 xscale_cache_purgeID, /* idcache_wbinv_all */ 660 xscale_cache_purgeID_rng, /* idcache_wbinv_range */ 661 662 /* Other functions */ 663 664 cpufunc_nullop, /* flush_prefetchbuf */ 665 armv4_drain_writebuf, /* drain_writebuf */ 666 cpufunc_nullop, /* flush_brnchtgt_C */ 667 (void *)cpufunc_nullop, /* flush_brnchtgt_E */ 668 669 xscale_cpu_sleep, /* sleep */ 670 671 /* Soft functions */ 672 673 cpufunc_null_fixup, /* dataabt_fixup */ 674 cpufunc_null_fixup, /* prefetchabt_fixup */ 675 676 xscale_context_switch, /* context_switch */ 677 678 xscale_setup /* cpu setup */ 679 }; 680 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */ 681 682 /* 683 * Global constants also used by locore.s 684 */ 685 686 struct cpu_functions cpufuncs; 687 u_int cputype; 688 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */ 689 690 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \ 691 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 692 defined(CPU_XSCALE_PXA2X0) 693 static void get_cachetype_cp15 __P((void)); 694 695 static void 696 get_cachetype_cp15() 697 { 698 u_int ctype, isize, dsize; 699 u_int multiplier; 700 701 __asm __volatile("mrc p15, 0, %0, c0, c0, 1" 702 : "=r" (ctype)); 703 704 /* 705 * ...and thus spake the ARM ARM: 706 * 707 * If an <opcode2> value corresponding to an unimplemented or 708 * reserved ID register is encountered, the System Control 709 * processor returns the value of the main ID register. 710 */ 711 if (ctype == cpufunc_id()) 712 goto out; 713 714 if ((ctype & CPU_CT_S) == 0) 715 arm_pcache_unified = 1; 716 717 /* 718 * If you want to know how this code works, go read the ARM ARM. 719 */ 720 721 arm_pcache_type = CPU_CT_CTYPE(ctype); 722 723 if (arm_pcache_unified == 0) { 724 isize = CPU_CT_ISIZE(ctype); 725 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; 726 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); 727 if (CPU_CT_xSIZE_ASSOC(isize) == 0) { 728 if (isize & CPU_CT_xSIZE_M) 729 arm_picache_line_size = 0; /* not present */ 730 else 731 arm_picache_ways = 1; 732 } else { 733 arm_picache_ways = multiplier << 734 (CPU_CT_xSIZE_ASSOC(isize) - 1); 735 } 736 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); 737 } 738 739 dsize = CPU_CT_DSIZE(ctype); 740 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; 741 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); 742 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { 743 if (dsize & CPU_CT_xSIZE_M) 744 arm_pdcache_line_size = 0; /* not present */ 745 else 746 arm_pdcache_ways = 0; 747 } else { 748 arm_pdcache_ways = multiplier << 749 (CPU_CT_xSIZE_ASSOC(dsize) - 1); 750 } 751 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); 752 753 arm_dcache_align = arm_pdcache_line_size; 754 755 out: 756 arm_dcache_align_mask = arm_dcache_align - 1; 757 } 758 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */ 759 760 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 761 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \ 762 defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0) 763 /* Cache information for CPUs without cache type registers. */ 764 struct cachetab { 765 u_int32_t ct_cpuid; 766 int ct_pcache_type; 767 int ct_pcache_unified; 768 int ct_pdcache_size; 769 int ct_pdcache_line_size; 770 int ct_pdcache_ways; 771 int ct_picache_size; 772 int ct_picache_line_size; 773 int ct_picache_ways; 774 }; 775 776 struct cachetab cachetab[] = { 777 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */ 778 { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 }, 779 { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 }, 780 { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 781 { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 782 { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 }, 783 { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 784 { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 }, 785 { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 786 /* XXX is this type right for SA-1? */ 787 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, 788 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 789 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 790 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */ 791 { 0, 0, 0, 0, 0, 0, 0, 0} 792 }; 793 794 static void get_cachetype_table __P((void)); 795 796 static void 797 get_cachetype_table() 798 { 799 int i; 800 u_int32_t cpuid = cpufunc_id(); 801 802 for (i = 0; cachetab[i].ct_cpuid != 0; i++) { 803 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) { 804 arm_pcache_type = cachetab[i].ct_pcache_type; 805 arm_pcache_unified = cachetab[i].ct_pcache_unified; 806 arm_pdcache_size = cachetab[i].ct_pdcache_size; 807 arm_pdcache_line_size = 808 cachetab[i].ct_pdcache_line_size; 809 arm_pdcache_ways = cachetab[i].ct_pdcache_ways; 810 arm_picache_size = cachetab[i].ct_picache_size; 811 arm_picache_line_size = 812 cachetab[i].ct_picache_line_size; 813 arm_picache_ways = cachetab[i].ct_picache_ways; 814 } 815 } 816 arm_dcache_align = arm_pdcache_line_size; 817 818 arm_dcache_align_mask = arm_dcache_align - 1; 819 } 820 821 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */ 822 823 /* 824 * Cannot panic here as we may not have a console yet ... 825 */ 826 827 int 828 set_cpufuncs() 829 { 830 cputype = cpufunc_id(); 831 cputype &= CPU_ID_CPU_MASK; 832 833 /* 834 * NOTE: cpu_do_powersave defaults to off. If we encounter a 835 * CPU type where we want to use it by default, then we set it. 836 */ 837 838 #ifdef CPU_ARM3 839 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 840 (cputype & 0x00000f00) == 0x00000300) { 841 cpufuncs = arm3_cpufuncs; 842 cpu_reset_needs_v4_MMU_disable = 0; 843 get_cachetype_table(); 844 return 0; 845 } 846 #endif /* CPU_ARM3 */ 847 #ifdef CPU_ARM6 848 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 849 (cputype & 0x00000f00) == 0x00000600) { 850 cpufuncs = arm6_cpufuncs; 851 cpu_reset_needs_v4_MMU_disable = 0; 852 get_cachetype_table(); 853 pmap_pte_init_generic(); 854 return 0; 855 } 856 #endif /* CPU_ARM6 */ 857 #ifdef CPU_ARM7 858 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 859 CPU_ID_IS7(cputype) && 860 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) { 861 cpufuncs = arm7_cpufuncs; 862 cpu_reset_needs_v4_MMU_disable = 0; 863 get_cachetype_table(); 864 pmap_pte_init_generic(); 865 return 0; 866 } 867 #endif /* CPU_ARM7 */ 868 #ifdef CPU_ARM7TDMI 869 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 870 CPU_ID_IS7(cputype) && 871 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) { 872 cpufuncs = arm7tdmi_cpufuncs; 873 cpu_reset_needs_v4_MMU_disable = 0; 874 get_cachetype_cp15(); 875 pmap_pte_init_generic(); 876 return 0; 877 } 878 #endif 879 #ifdef CPU_ARM8 880 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 881 (cputype & 0x0000f000) == 0x00008000) { 882 cpufuncs = arm8_cpufuncs; 883 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */ 884 get_cachetype_cp15(); 885 pmap_pte_init_generic(); 886 return 0; 887 } 888 #endif /* CPU_ARM8 */ 889 #ifdef CPU_ARM9 890 if (cputype == CPU_ID_ARM920T) { 891 cpufuncs = arm9_cpufuncs; 892 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 893 get_cachetype_cp15(); 894 pmap_pte_init_arm9(); 895 return 0; 896 } 897 #endif /* CPU_ARM9 */ 898 #ifdef CPU_SA110 899 if (cputype == CPU_ID_SA110) { 900 cpufuncs = sa110_cpufuncs; 901 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 902 get_cachetype_table(); 903 pmap_pte_init_generic(); 904 return 0; 905 } 906 #endif /* CPU_SA110 */ 907 #ifdef CPU_SA1100 908 if (cputype == CPU_ID_SA1100) { 909 cpufuncs = sa11x0_cpufuncs; 910 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 911 get_cachetype_table(); 912 pmap_pte_init_generic(); 913 914 /* Use powersave on this CPU. */ 915 cpu_do_powersave = 1; 916 917 return 0; 918 } 919 #endif /* CPU_SA1100 */ 920 #ifdef CPU_SA1110 921 if (cputype == CPU_ID_SA1110) { 922 cpufuncs = sa11x0_cpufuncs; 923 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 924 get_cachetype_table(); 925 pmap_pte_init_generic(); 926 927 /* Use powersave on this CPU. */ 928 cpu_do_powersave = 1; 929 930 return 0; 931 } 932 #endif /* CPU_SA1110 */ 933 #ifdef CPU_IXP12X0 934 if (cputype == CPU_ID_IXP1200) { 935 cpufuncs = ixp12x0_cpufuncs; 936 cpu_reset_needs_v4_MMU_disable = 1; 937 get_cachetype_table(); 938 pmap_pte_init_generic(); 939 return 0; 940 } 941 #endif /* CPU_IXP12X0 */ 942 #ifdef CPU_XSCALE_80200 943 if (cputype == CPU_ID_80200) { 944 int rev = cpufunc_id() & CPU_ID_REVISION_MASK; 945 946 i80200_icu_init(); 947 948 /* 949 * Reset the Performance Monitoring Unit to a 950 * pristine state: 951 * - CCNT, PMN0, PMN1 reset to 0 952 * - overflow indications cleared 953 * - all counters disabled 954 */ 955 __asm __volatile("mcr p14, 0, %0, c0, c0, 0" 956 : 957 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 958 PMNC_CC_IF)); 959 960 #if defined(XSCALE_CCLKCFG) 961 /* 962 * Crank CCLKCFG to maximum legal value. 963 */ 964 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0" 965 : 966 : "r" (XSCALE_CCLKCFG)); 967 #endif 968 969 /* 970 * XXX Disable ECC in the Bus Controller Unit; we 971 * don't really support it, yet. Clear any pending 972 * error indications. 973 */ 974 __asm __volatile("mcr p13, 0, %0, c0, c1, 0" 975 : 976 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV)); 977 978 cpufuncs = xscale_cpufuncs; 979 #if defined(PERFCTRS) 980 xscale_pmu_init(); 981 #endif 982 983 /* 984 * i80200 errata: Step-A0 and A1 have a bug where 985 * D$ dirty bits are not cleared on "invalidate by 986 * address". 987 * 988 * Workaround: Clean cache line before invalidating. 989 */ 990 if (rev == 0 || rev == 1) 991 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng; 992 993 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 994 get_cachetype_cp15(); 995 pmap_pte_init_xscale(); 996 return 0; 997 } 998 #endif /* CPU_XSCALE_80200 */ 999 #ifdef CPU_XSCALE_80321 1000 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 || 1001 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) { 1002 i80321_icu_init(); 1003 1004 /* 1005 * Reset the Performance Monitoring Unit to a 1006 * pristine state: 1007 * - CCNT, PMN0, PMN1 reset to 0 1008 * - overflow indications cleared 1009 * - all counters disabled 1010 */ 1011 __asm __volatile("mcr p14, 0, %0, c0, c0, 0" 1012 : 1013 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1014 PMNC_CC_IF)); 1015 1016 cpufuncs = xscale_cpufuncs; 1017 #if defined(PERFCTRS) 1018 xscale_pmu_init(); 1019 #endif 1020 1021 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1022 get_cachetype_cp15(); 1023 pmap_pte_init_xscale(); 1024 return 0; 1025 } 1026 #endif /* CPU_XSCALE_80321 */ 1027 #ifdef CPU_XSCALE_PXA2X0 1028 if (cputype == CPU_ID_PXA250A || cputype == CPU_ID_PXA210A || 1029 cputype == CPU_ID_PXA250B || cputype == CPU_ID_PXA210B) { 1030 cpufuncs = xscale_cpufuncs; 1031 #if defined(PERFCTRS) 1032 xscale_pmu_init(); 1033 #endif 1034 1035 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1036 get_cachetype_cp15(); 1037 pmap_pte_init_xscale(); 1038 1039 /* Use powersave on this CPU. */ 1040 cpu_do_powersave = 1; 1041 1042 return 0; 1043 } 1044 #endif /* CPU_XSCALE_PXA2X0 */ 1045 /* 1046 * Bzzzz. And the answer was ... 1047 */ 1048 panic("No support for this CPU type (%08x) in kernel", cputype); 1049 return(ARCHITECTURE_NOT_PRESENT); 1050 } 1051 1052 /* 1053 * Fixup routines for data and prefetch aborts. 1054 * 1055 * Several compile time symbols are used 1056 * 1057 * DEBUG_FAULT_CORRECTION - Print debugging information during the 1058 * correction of registers after a fault. 1059 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts 1060 * when defined should use late aborts 1061 */ 1062 1063 1064 /* 1065 * Null abort fixup routine. 1066 * For use when no fixup is required. 1067 */ 1068 int 1069 cpufunc_null_fixup(arg) 1070 void *arg; 1071 { 1072 return(ABORT_FIXUP_OK); 1073 } 1074 1075 1076 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1077 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 1078 1079 #ifdef DEBUG_FAULT_CORRECTION 1080 #define DFC_PRINTF(x) printf x 1081 #define DFC_DISASSEMBLE(x) disassemble(x) 1082 #else 1083 #define DFC_PRINTF(x) /* nothing */ 1084 #define DFC_DISASSEMBLE(x) /* nothing */ 1085 #endif 1086 1087 /* 1088 * "Early" data abort fixup. 1089 * 1090 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used 1091 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI]. 1092 * 1093 * In early aborts, we may have to fix up LDM, STM, LDC and STC. 1094 */ 1095 int 1096 early_abort_fixup(arg) 1097 void *arg; 1098 { 1099 trapframe_t *frame = arg; 1100 u_int fault_pc; 1101 u_int fault_instruction; 1102 int saved_lr = 0; 1103 1104 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1105 1106 /* Ok an abort in SVC mode */ 1107 1108 /* 1109 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1110 * as the fault happened in svc mode but we need it in the 1111 * usr slot so we can treat the registers as an array of ints 1112 * during fixing. 1113 * NOTE: This PC is in the position but writeback is not 1114 * allowed on r15. 1115 * Doing it like this is more efficient than trapping this 1116 * case in all possible locations in the following fixup code. 1117 */ 1118 1119 saved_lr = frame->tf_usr_lr; 1120 frame->tf_usr_lr = frame->tf_svc_lr; 1121 1122 /* 1123 * Note the trapframe does not have the SVC r13 so a fault 1124 * from an instruction with writeback to r13 in SVC mode is 1125 * not allowed. This should not happen as the kstack is 1126 * always valid. 1127 */ 1128 } 1129 1130 /* Get fault address and status from the CPU */ 1131 1132 fault_pc = frame->tf_pc; 1133 fault_instruction = *((volatile unsigned int *)fault_pc); 1134 1135 /* Decode the fault instruction and fix the registers as needed */ 1136 1137 if ((fault_instruction & 0x0e000000) == 0x08000000) { 1138 int base; 1139 int loop; 1140 int count; 1141 int *registers = &frame->tf_r0; 1142 1143 DFC_PRINTF(("LDM/STM\n")); 1144 DFC_DISASSEMBLE(fault_pc); 1145 if (fault_instruction & (1 << 21)) { 1146 DFC_PRINTF(("This instruction must be corrected\n")); 1147 base = (fault_instruction >> 16) & 0x0f; 1148 if (base == 15) 1149 return ABORT_FIXUP_FAILED; 1150 /* Count registers transferred */ 1151 count = 0; 1152 for (loop = 0; loop < 16; ++loop) { 1153 if (fault_instruction & (1<<loop)) 1154 ++count; 1155 } 1156 DFC_PRINTF(("%d registers used\n", count)); 1157 DFC_PRINTF(("Corrected r%d by %d bytes ", 1158 base, count * 4)); 1159 if (fault_instruction & (1 << 23)) { 1160 DFC_PRINTF(("down\n")); 1161 registers[base] -= count * 4; 1162 } else { 1163 DFC_PRINTF(("up\n")); 1164 registers[base] += count * 4; 1165 } 1166 } 1167 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) { 1168 int base; 1169 int offset; 1170 int *registers = &frame->tf_r0; 1171 1172 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */ 1173 1174 DFC_DISASSEMBLE(fault_pc); 1175 1176 /* Only need to fix registers if write back is turned on */ 1177 1178 if ((fault_instruction & (1 << 21)) != 0) { 1179 base = (fault_instruction >> 16) & 0x0f; 1180 if (base == 13 && 1181 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 1182 return ABORT_FIXUP_FAILED; 1183 if (base == 15) 1184 return ABORT_FIXUP_FAILED; 1185 1186 offset = (fault_instruction & 0xff) << 2; 1187 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1188 if ((fault_instruction & (1 << 23)) != 0) 1189 offset = -offset; 1190 registers[base] += offset; 1191 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1192 } 1193 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) 1194 return ABORT_FIXUP_FAILED; 1195 1196 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1197 1198 /* Ok an abort in SVC mode */ 1199 1200 /* 1201 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1202 * as the fault happened in svc mode but we need it in the 1203 * usr slot so we can treat the registers as an array of ints 1204 * during fixing. 1205 * NOTE: This PC is in the position but writeback is not 1206 * allowed on r15. 1207 * Doing it like this is more efficient than trapping this 1208 * case in all possible locations in the prior fixup code. 1209 */ 1210 1211 frame->tf_svc_lr = frame->tf_usr_lr; 1212 frame->tf_usr_lr = saved_lr; 1213 1214 /* 1215 * Note the trapframe does not have the SVC r13 so a fault 1216 * from an instruction with writeback to r13 in SVC mode is 1217 * not allowed. This should not happen as the kstack is 1218 * always valid. 1219 */ 1220 } 1221 1222 return(ABORT_FIXUP_OK); 1223 } 1224 #endif /* CPU_ARM2/250/3/6/7 */ 1225 1226 1227 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \ 1228 defined(CPU_ARM7TDMI) 1229 /* 1230 * "Late" (base updated) data abort fixup 1231 * 1232 * For ARM6 (in late-abort mode) and ARM7. 1233 * 1234 * In this model, all data-transfer instructions need fixing up. We defer 1235 * LDM, STM, LDC and STC fixup to the early-abort handler. 1236 */ 1237 int 1238 late_abort_fixup(arg) 1239 void *arg; 1240 { 1241 trapframe_t *frame = arg; 1242 u_int fault_pc; 1243 u_int fault_instruction; 1244 int saved_lr = 0; 1245 1246 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1247 1248 /* Ok an abort in SVC mode */ 1249 1250 /* 1251 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1252 * as the fault happened in svc mode but we need it in the 1253 * usr slot so we can treat the registers as an array of ints 1254 * during fixing. 1255 * NOTE: This PC is in the position but writeback is not 1256 * allowed on r15. 1257 * Doing it like this is more efficient than trapping this 1258 * case in all possible locations in the following fixup code. 1259 */ 1260 1261 saved_lr = frame->tf_usr_lr; 1262 frame->tf_usr_lr = frame->tf_svc_lr; 1263 1264 /* 1265 * Note the trapframe does not have the SVC r13 so a fault 1266 * from an instruction with writeback to r13 in SVC mode is 1267 * not allowed. This should not happen as the kstack is 1268 * always valid. 1269 */ 1270 } 1271 1272 /* Get fault address and status from the CPU */ 1273 1274 fault_pc = frame->tf_pc; 1275 fault_instruction = *((volatile unsigned int *)fault_pc); 1276 1277 /* Decode the fault instruction and fix the registers as needed */ 1278 1279 /* Was is a swap instruction ? */ 1280 1281 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) { 1282 DFC_DISASSEMBLE(fault_pc); 1283 } else if ((fault_instruction & 0x0c000000) == 0x04000000) { 1284 1285 /* Was is a ldr/str instruction */ 1286 /* This is for late abort only */ 1287 1288 int base; 1289 int offset; 1290 int *registers = &frame->tf_r0; 1291 1292 DFC_DISASSEMBLE(fault_pc); 1293 1294 /* This is for late abort only */ 1295 1296 if ((fault_instruction & (1 << 24)) == 0 1297 || (fault_instruction & (1 << 21)) != 0) { 1298 /* postindexed ldr/str with no writeback */ 1299 1300 base = (fault_instruction >> 16) & 0x0f; 1301 if (base == 13 && 1302 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 1303 return ABORT_FIXUP_FAILED; 1304 if (base == 15) 1305 return ABORT_FIXUP_FAILED; 1306 DFC_PRINTF(("late abt fix: r%d=%08x : ", 1307 base, registers[base])); 1308 if ((fault_instruction & (1 << 25)) == 0) { 1309 /* Immediate offset - easy */ 1310 1311 offset = fault_instruction & 0xfff; 1312 if ((fault_instruction & (1 << 23))) 1313 offset = -offset; 1314 registers[base] += offset; 1315 DFC_PRINTF(("imm=%08x ", offset)); 1316 } else { 1317 /* offset is a shifted register */ 1318 int shift; 1319 1320 offset = fault_instruction & 0x0f; 1321 if (offset == base) 1322 return ABORT_FIXUP_FAILED; 1323 1324 /* 1325 * Register offset - hard we have to 1326 * cope with shifts ! 1327 */ 1328 offset = registers[offset]; 1329 1330 if ((fault_instruction & (1 << 4)) == 0) 1331 /* shift with amount */ 1332 shift = (fault_instruction >> 7) & 0x1f; 1333 else { 1334 /* shift with register */ 1335 if ((fault_instruction & (1 << 7)) != 0) 1336 /* undefined for now so bail out */ 1337 return ABORT_FIXUP_FAILED; 1338 shift = ((fault_instruction >> 8) & 0xf); 1339 if (base == shift) 1340 return ABORT_FIXUP_FAILED; 1341 DFC_PRINTF(("shift reg=%d ", shift)); 1342 shift = registers[shift]; 1343 } 1344 DFC_PRINTF(("shift=%08x ", shift)); 1345 switch (((fault_instruction >> 5) & 0x3)) { 1346 case 0 : /* Logical left */ 1347 offset = (int)(((u_int)offset) << shift); 1348 break; 1349 case 1 : /* Logical Right */ 1350 if (shift == 0) shift = 32; 1351 offset = (int)(((u_int)offset) >> shift); 1352 break; 1353 case 2 : /* Arithmetic Right */ 1354 if (shift == 0) shift = 32; 1355 offset = (int)(((int)offset) >> shift); 1356 break; 1357 case 3 : /* Rotate right (rol or rxx) */ 1358 return ABORT_FIXUP_FAILED; 1359 break; 1360 } 1361 1362 DFC_PRINTF(("abt: fixed LDR/STR with " 1363 "register offset\n")); 1364 if ((fault_instruction & (1 << 23))) 1365 offset = -offset; 1366 DFC_PRINTF(("offset=%08x ", offset)); 1367 registers[base] += offset; 1368 } 1369 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1370 } 1371 } 1372 1373 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1374 1375 /* Ok an abort in SVC mode */ 1376 1377 /* 1378 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1379 * as the fault happened in svc mode but we need it in the 1380 * usr slot so we can treat the registers as an array of ints 1381 * during fixing. 1382 * NOTE: This PC is in the position but writeback is not 1383 * allowed on r15. 1384 * Doing it like this is more efficient than trapping this 1385 * case in all possible locations in the prior fixup code. 1386 */ 1387 1388 frame->tf_svc_lr = frame->tf_usr_lr; 1389 frame->tf_usr_lr = saved_lr; 1390 1391 /* 1392 * Note the trapframe does not have the SVC r13 so a fault 1393 * from an instruction with writeback to r13 in SVC mode is 1394 * not allowed. This should not happen as the kstack is 1395 * always valid. 1396 */ 1397 } 1398 1399 /* 1400 * Now let the early-abort fixup routine have a go, in case it 1401 * was an LDM, STM, LDC or STC that faulted. 1402 */ 1403 1404 return early_abort_fixup(arg); 1405 } 1406 #endif /* CPU_ARM6(LATE)/7/7TDMI */ 1407 1408 /* 1409 * CPU Setup code 1410 */ 1411 1412 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \ 1413 defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \ 1414 defined(CPU_SA1100) || defined(CPU_SA1110) || \ 1415 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1416 defined(CPU_XSCALE_PXA2X0) 1417 1418 #define IGN 0 1419 #define OR 1 1420 #define BIC 2 1421 1422 struct cpu_option { 1423 char *co_name; 1424 int co_falseop; 1425 int co_trueop; 1426 int co_value; 1427 }; 1428 1429 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int)); 1430 1431 static u_int 1432 parse_cpu_options(args, optlist, cpuctrl) 1433 char *args; 1434 struct cpu_option *optlist; 1435 u_int cpuctrl; 1436 { 1437 int integer; 1438 1439 while (optlist->co_name) { 1440 if (get_bootconf_option(args, optlist->co_name, 1441 BOOTOPT_TYPE_BOOLEAN, &integer)) { 1442 if (integer) { 1443 if (optlist->co_trueop == OR) 1444 cpuctrl |= optlist->co_value; 1445 else if (optlist->co_trueop == BIC) 1446 cpuctrl &= ~optlist->co_value; 1447 } else { 1448 if (optlist->co_falseop == OR) 1449 cpuctrl |= optlist->co_value; 1450 else if (optlist->co_falseop == BIC) 1451 cpuctrl &= ~optlist->co_value; 1452 } 1453 } 1454 ++optlist; 1455 } 1456 return(cpuctrl); 1457 } 1458 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */ 1459 1460 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \ 1461 || defined(CPU_ARM8) 1462 struct cpu_option arm678_options[] = { 1463 #ifdef COMPAT_12 1464 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE }, 1465 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 1466 #endif /* COMPAT_12 */ 1467 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 1468 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 1469 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1470 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1471 { NULL, IGN, IGN, 0 } 1472 }; 1473 1474 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */ 1475 1476 #ifdef CPU_ARM6 1477 struct cpu_option arm6_options[] = { 1478 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 1479 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 1480 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1481 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1482 { NULL, IGN, IGN, 0 } 1483 }; 1484 1485 void 1486 arm6_setup(args) 1487 char *args; 1488 { 1489 int cpuctrl, cpuctrlmask; 1490 1491 /* Set up default control registers bits */ 1492 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1493 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1494 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 1495 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1496 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1497 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 1498 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 1499 | CPU_CONTROL_AFLT_ENABLE; 1500 1501 #ifdef ARM6_LATE_ABORT 1502 cpuctrl |= CPU_CONTROL_LABT_ENABLE; 1503 #endif /* ARM6_LATE_ABORT */ 1504 1505 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 1506 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl); 1507 1508 #ifdef __ARMEB__ 1509 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1510 #endif 1511 1512 /* Clear out the cache */ 1513 cpu_idcache_wbinv_all(); 1514 1515 /* Set the control register */ 1516 curcpu()->ci_ctrl = cpuctrl; 1517 cpu_control(0xffffffff, cpuctrl); 1518 } 1519 #endif /* CPU_ARM6 */ 1520 1521 #ifdef CPU_ARM7 1522 struct cpu_option arm7_options[] = { 1523 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 1524 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 1525 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1526 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1527 #ifdef COMPAT_12 1528 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 1529 #endif /* COMPAT_12 */ 1530 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 1531 { NULL, IGN, IGN, 0 } 1532 }; 1533 1534 void 1535 arm7_setup(args) 1536 char *args; 1537 { 1538 int cpuctrl, cpuctrlmask; 1539 1540 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1541 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1542 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 1543 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1544 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1545 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 1546 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE 1547 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 1548 | CPU_CONTROL_AFLT_ENABLE; 1549 1550 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 1551 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl); 1552 1553 #ifdef __ARMEB__ 1554 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1555 #endif 1556 1557 /* Clear out the cache */ 1558 cpu_idcache_wbinv_all(); 1559 1560 /* Set the control register */ 1561 curcpu()->ci_ctrl = cpuctrl; 1562 cpu_control(0xffffffff, cpuctrl); 1563 } 1564 #endif /* CPU_ARM7 */ 1565 1566 #ifdef CPU_ARM7TDMI 1567 struct cpu_option arm7tdmi_options[] = { 1568 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 1569 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 1570 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1571 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1572 #ifdef COMPAT_12 1573 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 1574 #endif /* COMPAT_12 */ 1575 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 1576 { NULL, IGN, IGN, 0 } 1577 }; 1578 1579 void 1580 arm7tdmi_setup(args) 1581 char *args; 1582 { 1583 int cpuctrl; 1584 1585 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1586 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1587 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 1588 1589 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 1590 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl); 1591 1592 #ifdef __ARMEB__ 1593 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1594 #endif 1595 1596 /* Clear out the cache */ 1597 cpu_idcache_wbinv_all(); 1598 1599 /* Set the control register */ 1600 curcpu()->ci_ctrl = cpuctrl; 1601 cpu_control(0xffffffff, cpuctrl); 1602 } 1603 #endif /* CPU_ARM7TDMI */ 1604 1605 #ifdef CPU_ARM8 1606 struct cpu_option arm8_options[] = { 1607 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 1608 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 1609 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1610 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1611 #ifdef COMPAT_12 1612 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 1613 #endif /* COMPAT_12 */ 1614 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 1615 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 1616 { NULL, IGN, IGN, 0 } 1617 }; 1618 1619 void 1620 arm8_setup(args) 1621 char *args; 1622 { 1623 int integer; 1624 int cpuctrl, cpuctrlmask; 1625 int clocktest; 1626 int setclock = 0; 1627 1628 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1629 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1630 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 1631 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1632 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1633 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 1634 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE 1635 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE; 1636 1637 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 1638 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl); 1639 1640 #ifdef __ARMEB__ 1641 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1642 #endif 1643 1644 /* Get clock configuration */ 1645 clocktest = arm8_clock_config(0, 0) & 0x0f; 1646 1647 /* Special ARM8 clock and test configuration */ 1648 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) { 1649 clocktest = 0; 1650 setclock = 1; 1651 } 1652 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) { 1653 if (integer) 1654 clocktest |= 0x01; 1655 else 1656 clocktest &= ~(0x01); 1657 setclock = 1; 1658 } 1659 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) { 1660 if (integer) 1661 clocktest |= 0x02; 1662 else 1663 clocktest &= ~(0x02); 1664 setclock = 1; 1665 } 1666 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) { 1667 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2; 1668 setclock = 1; 1669 } 1670 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) { 1671 clocktest |= (integer & 7) << 5; 1672 setclock = 1; 1673 } 1674 1675 /* Clear out the cache */ 1676 cpu_idcache_wbinv_all(); 1677 1678 /* Set the control register */ 1679 curcpu()->ci_ctrl = cpuctrl; 1680 cpu_control(0xffffffff, cpuctrl); 1681 1682 /* Set the clock/test register */ 1683 if (setclock) 1684 arm8_clock_config(0x7f, clocktest); 1685 } 1686 #endif /* CPU_ARM8 */ 1687 1688 #ifdef CPU_ARM9 1689 struct cpu_option arm9_options[] = { 1690 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1691 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1692 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1693 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 1694 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 1695 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1696 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1697 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1698 { NULL, IGN, IGN, 0 } 1699 }; 1700 1701 void 1702 arm9_setup(args) 1703 char *args; 1704 { 1705 int cpuctrl, cpuctrlmask; 1706 1707 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1708 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1709 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1710 | CPU_CONTROL_WBUF_ENABLE; 1711 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1712 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1713 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1714 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 1715 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 1716 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 1717 | CPU_CONTROL_CPCLK; 1718 1719 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); 1720 1721 #ifdef __ARMEB__ 1722 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1723 #endif 1724 1725 /* Clear out the cache */ 1726 cpu_idcache_wbinv_all(); 1727 1728 /* Set the control register */ 1729 curcpu()->ci_ctrl = cpuctrl; 1730 cpu_control(0xffffffff, cpuctrl); 1731 1732 } 1733 #endif /* CPU_ARM9 */ 1734 1735 #ifdef CPU_SA110 1736 struct cpu_option sa110_options[] = { 1737 #ifdef COMPAT_12 1738 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1739 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 1740 #endif /* COMPAT_12 */ 1741 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1742 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1743 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1744 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 1745 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 1746 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1747 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1748 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1749 { NULL, IGN, IGN, 0 } 1750 }; 1751 1752 void 1753 sa110_setup(args) 1754 char *args; 1755 { 1756 int cpuctrl, cpuctrlmask; 1757 1758 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1759 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1760 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1761 | CPU_CONTROL_WBUF_ENABLE; 1762 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1763 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1764 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1765 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 1766 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 1767 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 1768 | CPU_CONTROL_CPCLK; 1769 1770 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl); 1771 1772 #ifdef __ARMEB__ 1773 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1774 #endif 1775 1776 /* Clear out the cache */ 1777 cpu_idcache_wbinv_all(); 1778 1779 /* Set the control register */ 1780 curcpu()->ci_ctrl = cpuctrl; 1781 /* cpu_control(cpuctrlmask, cpuctrl);*/ 1782 cpu_control(0xffffffff, cpuctrl); 1783 1784 /* 1785 * enable clockswitching, note that this doesn't read or write to r0, 1786 * r0 is just to make it valid asm 1787 */ 1788 __asm ("mcr 15, 0, r0, c15, c1, 2"); 1789 } 1790 #endif /* CPU_SA110 */ 1791 1792 #if defined(CPU_SA1100) || defined(CPU_SA1110) 1793 struct cpu_option sa11x0_options[] = { 1794 #ifdef COMPAT_12 1795 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1796 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 1797 #endif /* COMPAT_12 */ 1798 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1799 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1800 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1801 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 1802 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 1803 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1804 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1805 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1806 { NULL, IGN, IGN, 0 } 1807 }; 1808 1809 void 1810 sa11x0_setup(args) 1811 char *args; 1812 { 1813 int cpuctrl, cpuctrlmask; 1814 1815 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1816 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1817 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1818 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 1819 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1820 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1821 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1822 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 1823 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 1824 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 1825 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 1826 1827 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl); 1828 1829 #ifdef __ARMEB__ 1830 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1831 #endif 1832 1833 /* Clear out the cache */ 1834 cpu_idcache_wbinv_all(); 1835 1836 /* Set the control register */ 1837 cpu_control(0xffffffff, cpuctrl); 1838 } 1839 #endif /* CPU_SA1100 || CPU_SA1110 */ 1840 1841 #if defined(CPU_IXP12X0) 1842 struct cpu_option ixp12x0_options[] = { 1843 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1844 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1845 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1846 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 1847 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 1848 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1849 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1850 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1851 { NULL, IGN, IGN, 0 } 1852 }; 1853 1854 void 1855 ixp12x0_setup(args) 1856 char *args; 1857 { 1858 int cpuctrl, cpuctrlmask; 1859 1860 1861 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE 1862 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE 1863 | CPU_CONTROL_IC_ENABLE; 1864 1865 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE 1866 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE 1867 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE 1868 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE 1869 | CPU_CONTROL_VECRELOC; 1870 1871 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl); 1872 1873 #ifdef __ARMEB__ 1874 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1875 #endif 1876 1877 /* Clear out the cache */ 1878 cpu_idcache_wbinv_all(); 1879 1880 /* Set the control register */ 1881 curcpu()->ci_ctrl = cpuctrl; 1882 /* cpu_control(0xffffffff, cpuctrl); */ 1883 cpu_control(cpuctrlmask, cpuctrl); 1884 } 1885 #endif /* CPU_IXP12X0 */ 1886 1887 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1888 defined(CPU_XSCALE_PXA2X0) 1889 struct cpu_option xscale_options[] = { 1890 #ifdef COMPAT_12 1891 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 1892 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1893 #endif /* COMPAT_12 */ 1894 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 1895 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1896 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1897 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 1898 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 1899 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 1900 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 1901 { NULL, IGN, IGN, 0 } 1902 }; 1903 1904 void 1905 xscale_setup(args) 1906 char *args; 1907 { 1908 uint32_t auxctl; 1909 int cpuctrl, cpuctrlmask; 1910 1911 /* 1912 * The XScale Write Buffer is always enabled. Our option 1913 * is to enable/disable coalescing. Note that bits 6:3 1914 * must always be enabled. 1915 */ 1916 1917 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1918 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1919 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1920 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE 1921 | CPU_CONTROL_BPRD_ENABLE; 1922 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1923 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1924 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 1925 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 1926 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 1927 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 1928 | CPU_CONTROL_CPCLK; 1929 1930 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); 1931 1932 #ifdef __ARMEB__ 1933 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 1934 #endif 1935 1936 /* Clear out the cache */ 1937 cpu_idcache_wbinv_all(); 1938 1939 /* 1940 * Set the control register. Note that bits 6:3 must always 1941 * be set to 1. 1942 */ 1943 curcpu()->ci_ctrl = cpuctrl; 1944 /* cpu_control(cpuctrlmask, cpuctrl);*/ 1945 cpu_control(0xffffffff, cpuctrl); 1946 1947 /* Make sure write coalescing is turned on */ 1948 __asm __volatile("mrc p15, 0, %0, c1, c0, 1" 1949 : "=r" (auxctl)); 1950 #if XSCALE_NO_COALESCE_WRITES 1951 auxctl |= XSCALE_AUXCTL_K; 1952 #else 1953 auxctl &= ~XSCALE_AUXCTL_K; 1954 #endif 1955 __asm __volatile("mcr p15, 0, %0, c1, c0, 1" 1956 : : "r" (auxctl)); 1957 } 1958 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */ 1959