1 /* cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp */ 2 3 /* 4 * Copyright (c) 1997 Mark Brinicombe. 5 * Copyright (c) 1997 Causality Limited 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Causality Limited. 19 * 4. The name of Causality Limited may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * RiscBSD kernel project 36 * 37 * cpufunc.h 38 * 39 * Prototypes for cpu, mmu and tlb related functions. 40 */ 41 42 #ifndef _ARM32_CPUFUNC_H_ 43 #define _ARM32_CPUFUNC_H_ 44 45 #ifdef _KERNEL 46 47 #include <sys/types.h> 48 #include <arm/armreg.h> 49 #include <arm/cpuconf.h> 50 #include <arm/armreg.h> 51 52 struct cpu_functions { 53 54 /* CPU functions */ 55 56 u_int (*cf_id) (void); 57 void (*cf_cpwait) (void); 58 59 /* MMU functions */ 60 61 u_int (*cf_control) (u_int, u_int); 62 void (*cf_domains) (u_int); 63 void (*cf_setttb) (u_int, bool); 64 u_int (*cf_faultstatus) (void); 65 u_int (*cf_faultaddress) (void); 66 67 /* TLB functions */ 68 69 void (*cf_tlb_flushID) (void); 70 void (*cf_tlb_flushID_SE) (u_int); 71 void (*cf_tlb_flushI) (void); 72 void (*cf_tlb_flushI_SE) (u_int); 73 void (*cf_tlb_flushD) (void); 74 void (*cf_tlb_flushD_SE) (u_int); 75 76 /* 77 * Cache operations: 78 * 79 * We define the following primitives: 80 * 81 * icache_sync_all Synchronize I-cache 82 * icache_sync_range Synchronize I-cache range 83 * 84 * dcache_wbinv_all Write-back and Invalidate D-cache 85 * dcache_wbinv_range Write-back and Invalidate D-cache range 86 * dcache_inv_range Invalidate D-cache range 87 * dcache_wb_range Write-back D-cache range 88 * 89 * idcache_wbinv_all Write-back and Invalidate D-cache, 90 * Invalidate I-cache 91 * idcache_wbinv_range Write-back and Invalidate D-cache, 92 * Invalidate I-cache range 93 * 94 * Note that the ARM term for "write-back" is "clean". We use 95 * the term "write-back" since it's a more common way to describe 96 * the operation. 97 * 98 * There are some rules that must be followed: 99 * 100 * I-cache Synch (all or range): 101 * The goal is to synchronize the instruction stream, 102 * so you may beed to write-back dirty D-cache blocks 103 * first. If a range is requested, and you can't 104 * synchronize just a range, you have to hit the whole 105 * thing. 106 * 107 * D-cache Write-Back and Invalidate range: 108 * If you can't WB-Inv a range, you must WB-Inv the 109 * entire D-cache. 110 * 111 * D-cache Invalidate: 112 * If you can't Inv the D-cache, you must Write-Back 113 * and Invalidate. Code that uses this operation 114 * MUST NOT assume that the D-cache will not be written 115 * back to memory. 116 * 117 * D-cache Write-Back: 118 * If you can't Write-back without doing an Inv, 119 * that's fine. Then treat this as a WB-Inv. 120 * Skipping the invalidate is merely an optimization. 121 * 122 * All operations: 123 * Valid virtual addresses must be passed to each 124 * cache operation. 125 */ 126 void (*cf_icache_sync_all) (void); 127 void (*cf_icache_sync_range) (vaddr_t, vsize_t); 128 129 void (*cf_dcache_wbinv_all) (void); 130 void (*cf_dcache_wbinv_range)(vaddr_t, vsize_t); 131 void (*cf_dcache_inv_range) (vaddr_t, vsize_t); 132 void (*cf_dcache_wb_range) (vaddr_t, vsize_t); 133 134 void (*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t); 135 void (*cf_sdcache_inv_range) (vaddr_t, paddr_t, psize_t); 136 void (*cf_sdcache_wb_range) (vaddr_t, paddr_t, psize_t); 137 138 void (*cf_idcache_wbinv_all) (void); 139 void (*cf_idcache_wbinv_range)(vaddr_t, vsize_t); 140 141 /* Other functions */ 142 143 void (*cf_flush_prefetchbuf) (void); 144 void (*cf_drain_writebuf) (void); 145 void (*cf_flush_brnchtgt_C) (void); 146 void (*cf_flush_brnchtgt_E) (u_int); 147 148 void (*cf_sleep) (int mode); 149 150 /* Soft functions */ 151 152 int (*cf_dataabt_fixup) (void *); 153 int (*cf_prefetchabt_fixup) (void *); 154 155 void (*cf_context_switch) (u_int); 156 157 void (*cf_setup) (char *); 158 }; 159 160 extern struct cpu_functions cpufuncs; 161 extern u_int cputype; 162 163 #define cpu_id() cpufuncs.cf_id() 164 165 #define cpu_control(c, e) cpufuncs.cf_control(c, e) 166 #define cpu_domains(d) cpufuncs.cf_domains(d) 167 #define cpu_setttb(t, f) cpufuncs.cf_setttb(t, f) 168 #define cpu_faultstatus() cpufuncs.cf_faultstatus() 169 #define cpu_faultaddress() cpufuncs.cf_faultaddress() 170 171 #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() 172 #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) 173 #define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI() 174 #define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e) 175 #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() 176 #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) 177 178 #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all() 179 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) 180 181 #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() 182 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) 183 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) 184 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) 185 186 #define cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s)) 187 #define cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s)) 188 #define cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s)) 189 190 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() 191 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) 192 193 #define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf() 194 #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() 195 #define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C() 196 #define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e) 197 198 #define cpu_sleep(m) cpufuncs.cf_sleep(m) 199 200 #define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a) 201 #define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a) 202 #define ABORT_FIXUP_OK 0 /* fixup succeeded */ 203 #define ABORT_FIXUP_FAILED 1 /* fixup failed */ 204 #define ABORT_FIXUP_RETURN 2 /* abort handler should return */ 205 206 #define cpu_context_switch(a) cpufuncs.cf_context_switch(a) 207 #define cpu_setup(a) cpufuncs.cf_setup(a) 208 209 int set_cpufuncs (void); 210 int set_cpufuncs_id (u_int); 211 #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ 212 #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ 213 214 void cpufunc_nullop (void); 215 int cpufunc_null_fixup (void *); 216 int early_abort_fixup (void *); 217 int late_abort_fixup (void *); 218 u_int cpufunc_id (void); 219 u_int cpufunc_control (u_int, u_int); 220 void cpufunc_domains (u_int); 221 u_int cpufunc_faultstatus (void); 222 u_int cpufunc_faultaddress (void); 223 224 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) 225 void arm3_cache_flush (void); 226 #endif /* CPU_ARM2 || CPU_ARM250 || CPU_ARM3 */ 227 228 #ifdef CPU_ARM2 229 u_int arm2_id (void); 230 #endif /* CPU_ARM2 */ 231 232 #ifdef CPU_ARM250 233 u_int arm250_id (void); 234 #endif 235 236 #ifdef CPU_ARM3 237 u_int arm3_control (u_int, u_int); 238 #endif /* CPU_ARM3 */ 239 240 #if defined(CPU_ARM6) || defined(CPU_ARM7) 241 void arm67_setttb (u_int, bool); 242 void arm67_tlb_flush (void); 243 void arm67_tlb_purge (u_int); 244 void arm67_cache_flush (void); 245 void arm67_context_switch (u_int); 246 #endif /* CPU_ARM6 || CPU_ARM7 */ 247 248 #ifdef CPU_ARM6 249 void arm6_setup (char *); 250 #endif /* CPU_ARM6 */ 251 252 #ifdef CPU_ARM7 253 void arm7_setup (char *); 254 #endif /* CPU_ARM7 */ 255 256 #ifdef CPU_ARM7TDMI 257 int arm7_dataabt_fixup (void *); 258 void arm7tdmi_setup (char *); 259 void arm7tdmi_setttb (u_int, bool); 260 void arm7tdmi_tlb_flushID (void); 261 void arm7tdmi_tlb_flushID_SE (u_int); 262 void arm7tdmi_cache_flushID (void); 263 void arm7tdmi_context_switch (u_int); 264 #endif /* CPU_ARM7TDMI */ 265 266 #ifdef CPU_ARM8 267 void arm8_setttb (u_int, bool); 268 void arm8_tlb_flushID (void); 269 void arm8_tlb_flushID_SE (u_int); 270 void arm8_cache_flushID (void); 271 void arm8_cache_flushID_E (u_int); 272 void arm8_cache_cleanID (void); 273 void arm8_cache_cleanID_E (u_int); 274 void arm8_cache_purgeID (void); 275 void arm8_cache_purgeID_E (u_int entry); 276 277 void arm8_cache_syncI (void); 278 void arm8_cache_cleanID_rng (vaddr_t, vsize_t); 279 void arm8_cache_cleanD_rng (vaddr_t, vsize_t); 280 void arm8_cache_purgeID_rng (vaddr_t, vsize_t); 281 void arm8_cache_purgeD_rng (vaddr_t, vsize_t); 282 void arm8_cache_syncI_rng (vaddr_t, vsize_t); 283 284 void arm8_context_switch (u_int); 285 286 void arm8_setup (char *); 287 288 u_int arm8_clock_config (u_int, u_int); 289 #endif 290 291 #ifdef CPU_FA526 292 void fa526_setup (char *); 293 void fa526_setttb (u_int, bool); 294 void fa526_context_switch (u_int); 295 void fa526_cpu_sleep (int); 296 void fa526_tlb_flushI_SE (u_int); 297 void fa526_tlb_flushID_SE (u_int); 298 void fa526_flush_prefetchbuf (void); 299 void fa526_flush_brnchtgt_E (u_int); 300 301 void fa526_icache_sync_all (void); 302 void fa526_icache_sync_range(vaddr_t, vsize_t); 303 void fa526_dcache_wbinv_all (void); 304 void fa526_dcache_wbinv_range(vaddr_t, vsize_t); 305 void fa526_dcache_inv_range (vaddr_t, vsize_t); 306 void fa526_dcache_wb_range (vaddr_t, vsize_t); 307 void fa526_idcache_wbinv_all(void); 308 void fa526_idcache_wbinv_range(vaddr_t, vsize_t); 309 #endif 310 311 #ifdef CPU_SA110 312 void sa110_setup (char *); 313 void sa110_context_switch (u_int); 314 #endif /* CPU_SA110 */ 315 316 #if defined(CPU_SA1100) || defined(CPU_SA1110) 317 void sa11x0_drain_readbuf (void); 318 319 void sa11x0_context_switch (u_int); 320 void sa11x0_cpu_sleep (int); 321 322 void sa11x0_setup (char *); 323 #endif 324 325 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) 326 void sa1_setttb (u_int, bool); 327 328 void sa1_tlb_flushID_SE (u_int); 329 330 void sa1_cache_flushID (void); 331 void sa1_cache_flushI (void); 332 void sa1_cache_flushD (void); 333 void sa1_cache_flushD_SE (u_int); 334 335 void sa1_cache_cleanID (void); 336 void sa1_cache_cleanD (void); 337 void sa1_cache_cleanD_E (u_int); 338 339 void sa1_cache_purgeID (void); 340 void sa1_cache_purgeID_E (u_int); 341 void sa1_cache_purgeD (void); 342 void sa1_cache_purgeD_E (u_int); 343 344 void sa1_cache_syncI (void); 345 void sa1_cache_cleanID_rng (vaddr_t, vsize_t); 346 void sa1_cache_cleanD_rng (vaddr_t, vsize_t); 347 void sa1_cache_purgeID_rng (vaddr_t, vsize_t); 348 void sa1_cache_purgeD_rng (vaddr_t, vsize_t); 349 void sa1_cache_syncI_rng (vaddr_t, vsize_t); 350 351 #endif 352 353 #ifdef CPU_ARM9 354 void arm9_setttb (u_int, bool); 355 356 void arm9_tlb_flushID_SE (u_int); 357 358 void arm9_icache_sync_all (void); 359 void arm9_icache_sync_range (vaddr_t, vsize_t); 360 361 void arm9_dcache_wbinv_all (void); 362 void arm9_dcache_wbinv_range (vaddr_t, vsize_t); 363 void arm9_dcache_inv_range (vaddr_t, vsize_t); 364 void arm9_dcache_wb_range (vaddr_t, vsize_t); 365 366 void arm9_idcache_wbinv_all (void); 367 void arm9_idcache_wbinv_range (vaddr_t, vsize_t); 368 369 void arm9_context_switch (u_int); 370 371 void arm9_setup (char *); 372 373 extern unsigned arm9_dcache_sets_max; 374 extern unsigned arm9_dcache_sets_inc; 375 extern unsigned arm9_dcache_index_max; 376 extern unsigned arm9_dcache_index_inc; 377 #endif 378 379 #if defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_SHEEVA) 380 void arm10_tlb_flushID_SE (u_int); 381 void arm10_tlb_flushI_SE (u_int); 382 383 void arm10_context_switch (u_int); 384 385 void arm10_setup (char *); 386 #endif 387 388 #if defined(CPU_ARM9E) || defined (CPU_ARM10) || defined(CPU_SHEEVA) 389 void armv5_ec_setttb (u_int, bool); 390 391 void armv5_ec_icache_sync_all (void); 392 void armv5_ec_icache_sync_range (vaddr_t, vsize_t); 393 394 void armv5_ec_dcache_wbinv_all (void); 395 void armv5_ec_dcache_wbinv_range (vaddr_t, vsize_t); 396 void armv5_ec_dcache_inv_range (vaddr_t, vsize_t); 397 void armv5_ec_dcache_wb_range (vaddr_t, vsize_t); 398 399 void armv5_ec_idcache_wbinv_all (void); 400 void armv5_ec_idcache_wbinv_range (vaddr_t, vsize_t); 401 #endif 402 403 #if defined (CPU_ARM10) || defined (CPU_ARM11MPCORE) 404 void armv5_setttb (u_int, bool); 405 406 void armv5_icache_sync_all (void); 407 void armv5_icache_sync_range (vaddr_t, vsize_t); 408 409 void armv5_dcache_wbinv_all (void); 410 void armv5_dcache_wbinv_range (vaddr_t, vsize_t); 411 void armv5_dcache_inv_range (vaddr_t, vsize_t); 412 void armv5_dcache_wb_range (vaddr_t, vsize_t); 413 414 void armv5_idcache_wbinv_all (void); 415 void armv5_idcache_wbinv_range (vaddr_t, vsize_t); 416 417 extern unsigned armv5_dcache_sets_max; 418 extern unsigned armv5_dcache_sets_inc; 419 extern unsigned armv5_dcache_index_max; 420 extern unsigned armv5_dcache_index_inc; 421 #endif 422 423 #if defined(CPU_ARM11MPCORE) 424 void arm11mpcore_setup (char *); 425 #endif 426 427 #if defined(CPU_ARM11) || defined(CPU_CORTEX) 428 void arm11_setttb (u_int, bool); 429 430 void arm11_tlb_flushID_SE (u_int); 431 void arm11_tlb_flushI_SE (u_int); 432 433 void arm11_context_switch (u_int); 434 435 void arm11_cpu_sleep (int); 436 void arm11_setup (char *string); 437 void arm11_tlb_flushID (void); 438 void arm11_tlb_flushI (void); 439 void arm11_tlb_flushD (void); 440 void arm11_tlb_flushD_SE (u_int va); 441 442 void armv11_dcache_wbinv_all (void); 443 void armv11_idcache_wbinv_all(void); 444 445 void arm11_drain_writebuf (void); 446 void arm11_sleep (int); 447 448 void armv6_setttb (u_int, bool); 449 450 void armv6_icache_sync_all (void); 451 void armv6_icache_sync_range (vaddr_t, vsize_t); 452 453 void armv6_dcache_wbinv_all (void); 454 void armv6_dcache_wbinv_range (vaddr_t, vsize_t); 455 void armv6_dcache_inv_range (vaddr_t, vsize_t); 456 void armv6_dcache_wb_range (vaddr_t, vsize_t); 457 458 void armv6_idcache_wbinv_all (void); 459 void armv6_idcache_wbinv_range (vaddr_t, vsize_t); 460 #endif 461 462 #if defined(CPU_CORTEX) 463 void armv7_setttb(u_int, bool); 464 465 void armv7_icache_sync_range(vaddr_t, vsize_t); 466 void armv7_dcache_wb_range(vaddr_t, vsize_t); 467 void armv7_dcache_wbinv_range(vaddr_t, vsize_t); 468 void armv7_dcache_inv_range(vaddr_t, vsize_t); 469 void armv7_idcache_wbinv_range(vaddr_t, vsize_t); 470 471 void armv7_icache_sync_all(void); 472 void armv7_cpu_sleep(int); 473 void armv7_context_switch(u_int); 474 void armv7_tlb_flushID_SE(u_int); 475 void armv7_drain_writebuf(void); 476 void armv7_setup(char *string); 477 #endif 478 479 #if defined(CPU_CORTEX) || defined(CPU_PJ4B) 480 void armv7_dcache_wbinv_all (void); 481 void armv7_idcache_wbinv_all(void); 482 #endif 483 484 #if defined(CPU_PJ4B) 485 void pj4b_setttb(u_int, bool); 486 void pj4b_tlb_flushID(void); 487 void pj4b_tlb_flushID_SE(u_int); 488 489 void pj4b_icache_sync_range(vm_offset_t, vm_size_t); 490 void pj4b_idcache_wbinv_range(vm_offset_t, vm_size_t); 491 void pj4b_dcache_wbinv_range(vm_offset_t, vm_size_t); 492 void pj4b_dcache_inv_range(vm_offset_t, vm_size_t); 493 void pj4b_dcache_wb_range(vm_offset_t, vm_size_t); 494 495 void pj4b_drain_writebuf(void); 496 void pj4b_drain_readbuf(void); 497 void pj4b_flush_brnchtgt_all(void); 498 void pj4b_flush_brnchtgt_va(u_int); 499 void pj4b_context_switch(u_int); 500 void pj4b_sleep(int); 501 502 void pj4bv7_setup(char *string); 503 void pj4b_config(void); 504 505 #endif /* CPU_PJ4B */ 506 507 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) 508 void arm11x6_setttb (u_int, bool); 509 void arm11x6_idcache_wbinv_all (void); 510 void arm11x6_dcache_wbinv_all (void); 511 void arm11x6_icache_sync_all (void); 512 void arm11x6_flush_prefetchbuf (void); 513 void arm11x6_icache_sync_range (vaddr_t, vsize_t); 514 void arm11x6_idcache_wbinv_range (vaddr_t, vsize_t); 515 void arm11x6_setup (char *string); 516 void arm11x6_sleep (int); /* no ref. for errata */ 517 #endif 518 #if defined(CPU_ARM1136) 519 void arm1136_sleep_rev0 (int); /* for errata 336501 */ 520 #endif 521 522 523 #if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \ 524 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 525 defined(CPU_FA526) || \ 526 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 527 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 528 defined(CPU_CORTEX) || defined(CPU_SHEEVA) 529 530 void armv4_tlb_flushID (void); 531 void armv4_tlb_flushI (void); 532 void armv4_tlb_flushD (void); 533 void armv4_tlb_flushD_SE (u_int); 534 535 void armv4_drain_writebuf (void); 536 #endif 537 538 #if defined(CPU_IXP12X0) 539 void ixp12x0_drain_readbuf (void); 540 void ixp12x0_context_switch (u_int); 541 void ixp12x0_setup (char *); 542 #endif 543 544 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 545 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 546 defined(CPU_CORTEX) 547 548 void xscale_cpwait (void); 549 #define cpu_cpwait() cpufuncs.cf_cpwait() 550 551 void xscale_cpu_sleep (int); 552 553 u_int xscale_control (u_int, u_int); 554 555 void xscale_setttb (u_int, bool); 556 557 void xscale_tlb_flushID_SE (u_int); 558 559 void xscale_cache_flushID (void); 560 void xscale_cache_flushI (void); 561 void xscale_cache_flushD (void); 562 void xscale_cache_flushD_SE (u_int); 563 564 void xscale_cache_cleanID (void); 565 void xscale_cache_cleanD (void); 566 void xscale_cache_cleanD_E (u_int); 567 568 void xscale_cache_clean_minidata (void); 569 570 void xscale_cache_purgeID (void); 571 void xscale_cache_purgeID_E (u_int); 572 void xscale_cache_purgeD (void); 573 void xscale_cache_purgeD_E (u_int); 574 575 void xscale_cache_syncI (void); 576 void xscale_cache_cleanID_rng (vaddr_t, vsize_t); 577 void xscale_cache_cleanD_rng (vaddr_t, vsize_t); 578 void xscale_cache_purgeID_rng (vaddr_t, vsize_t); 579 void xscale_cache_purgeD_rng (vaddr_t, vsize_t); 580 void xscale_cache_syncI_rng (vaddr_t, vsize_t); 581 void xscale_cache_flushD_rng (vaddr_t, vsize_t); 582 583 void xscale_context_switch (u_int); 584 585 void xscale_setup (char *); 586 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 || CPU_CORTEX */ 587 588 #if defined(CPU_SHEEVA) 589 void sheeva_dcache_wbinv_range (vaddr_t, vsize_t); 590 void sheeva_dcache_inv_range (vaddr_t, vsize_t); 591 void sheeva_dcache_wb_range (vaddr_t, vsize_t); 592 void sheeva_idcache_wbinv_range (vaddr_t, vsize_t); 593 void sheeva_setup(char *); 594 void sheeva_cpu_sleep(int); 595 #endif 596 597 #define tlb_flush cpu_tlb_flushID 598 #define setttb cpu_setttb 599 #define drain_writebuf cpu_drain_writebuf 600 601 #ifndef cpu_cpwait 602 #define cpu_cpwait() 603 #endif 604 605 /* 606 * Macros for manipulating CPU interrupts 607 */ 608 #ifdef __PROG32 609 static __inline uint32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__)); 610 static __inline uint32_t disable_interrupts(uint32_t mask) __attribute__((__unused__)); 611 static __inline uint32_t enable_interrupts(uint32_t mask) __attribute__((__unused__)); 612 613 static __inline uint32_t 614 __set_cpsr_c(uint32_t bic, uint32_t eor) 615 { 616 uint32_t tmp, ret; 617 618 __asm volatile( 619 "mrs %0, cpsr\n" /* Get the CPSR */ 620 "bic %1, %0, %2\n" /* Clear bits */ 621 "eor %1, %1, %3\n" /* XOR bits */ 622 "msr cpsr_c, %1\n" /* Set the control field of CPSR */ 623 : "=&r" (ret), "=&r" (tmp) 624 : "r" (bic), "r" (eor) : "memory"); 625 626 return ret; 627 } 628 629 static __inline uint32_t 630 disable_interrupts(uint32_t mask) 631 { 632 uint32_t tmp, ret; 633 mask &= (I32_bit | F32_bit); 634 635 __asm volatile( 636 "mrs %0, cpsr\n" /* Get the CPSR */ 637 "orr %1, %0, %2\n" /* set bits */ 638 "msr cpsr_c, %1\n" /* Set the control field of CPSR */ 639 : "=&r" (ret), "=&r" (tmp) 640 : "r" (mask) 641 : "memory"); 642 643 return ret; 644 } 645 646 static __inline uint32_t 647 enable_interrupts(uint32_t mask) 648 { 649 uint32_t ret, tmp; 650 mask &= (I32_bit | F32_bit); 651 652 __asm volatile( 653 "mrs %0, cpsr\n" /* Get the CPSR */ 654 "bic %1, %0, %2\n" /* Clear bits */ 655 "msr cpsr_c, %1\n" /* Set the control field of CPSR */ 656 : "=&r" (ret), "=&r" (tmp) 657 : "r" (mask) 658 : "memory"); 659 660 return ret; 661 } 662 663 #define restore_interrupts(old_cpsr) \ 664 (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit))) 665 666 static inline void cpsie(register_t psw) __attribute__((__unused__)); 667 static inline register_t cpsid(register_t psw) __attribute__((__unused__)); 668 669 static inline void 670 cpsie(register_t psw) 671 { 672 #ifdef _ARM_ARCH_6 673 if (!__builtin_constant_p(psw)) { 674 enable_interrupts(psw); 675 return; 676 } 677 switch (psw & (I32_bit|F32_bit)) { 678 case I32_bit: __asm("cpsie\ti"); break; 679 case F32_bit: __asm("cpsie\tf"); break; 680 case I32_bit|F32_bit: __asm("cpsie\tif"); break; 681 } 682 #else 683 enable_interrupts(psw); 684 #endif 685 } 686 687 static inline register_t 688 cpsid(register_t psw) 689 { 690 #ifdef _ARM_ARCH_6 691 register_t oldpsw; 692 if (!__builtin_constant_p(psw)) 693 return disable_interrupts(psw); 694 695 __asm("mrs %0, cpsr" : "=r"(oldpsw)); 696 switch (psw & (I32_bit|F32_bit)) { 697 case I32_bit: __asm("cpsid\ti"); break; 698 case F32_bit: __asm("cpsid\tf"); break; 699 case I32_bit|F32_bit: __asm("cpsid\tif"); break; 700 } 701 return oldpsw; 702 #else 703 return disable_interrupts(psw); 704 #endif 705 } 706 707 #else /* ! __PROG32 */ 708 #define disable_interrupts(mask) \ 709 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), \ 710 (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE))) 711 712 #define enable_interrupts(mask) \ 713 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0)) 714 715 #define restore_interrupts(old_r15) \ 716 (set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE), \ 717 (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE))) 718 #endif /* __PROG32 */ 719 720 #ifdef __PROG32 721 /* Functions to manipulate the CPSR. */ 722 u_int SetCPSR(u_int, u_int); 723 u_int GetCPSR(void); 724 #else 725 /* Functions to manipulate the processor control bits in r15. */ 726 u_int set_r15(u_int, u_int); 727 u_int get_r15(void); 728 #endif /* __PROG32 */ 729 730 731 /* 732 * CPU functions from locore.S 733 */ 734 735 void cpu_reset (void) __dead; 736 737 /* 738 * Cache info variables. 739 */ 740 741 /* PRIMARY CACHE VARIABLES */ 742 struct arm_cache_info { 743 u_int icache_size; 744 u_int icache_line_size; 745 u_int icache_ways; 746 u_int icache_sets; 747 748 u_int dcache_size; 749 u_int dcache_line_size; 750 u_int dcache_ways; 751 u_int dcache_sets; 752 753 u_int cache_type; 754 bool cache_unified; 755 }; 756 757 extern u_int arm_cache_prefer_mask; 758 extern u_int arm_dcache_align; 759 extern u_int arm_dcache_align_mask; 760 761 extern struct arm_cache_info arm_pcache; 762 extern struct arm_cache_info arm_scache; 763 #endif /* _KERNEL */ 764 765 #if defined(_KERNEL) || defined(_KMEMUSER) 766 /* 767 * Miscellany 768 */ 769 770 int get_pc_str_offset (void); 771 772 /* 773 * Functions to manipulate cpu r13 774 * (in arm/arm32/setstack.S) 775 */ 776 777 void set_stackptr (u_int, u_int); 778 u_int get_stackptr (u_int); 779 780 #endif /* _KERNEL || _KMEMUSER */ 781 782 #endif /* _ARM32_CPUFUNC_H_ */ 783 784 /* End of cpufunc.h */ 785