1 /* $NetBSD: cpufunc.h,v 1.23 2002/05/03 16:45:22 rjs Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Mark Brinicombe. 5 * Copyright (c) 1997 Causality Limited 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Causality Limited. 19 * 4. The name of Causality Limited may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * RiscBSD kernel project 36 * 37 * cpufunc.h 38 * 39 * Prototypes for cpu, mmu and tlb related functions. 40 */ 41 42 #ifndef _ARM32_CPUFUNC_H_ 43 #define _ARM32_CPUFUNC_H_ 44 45 #ifdef _KERNEL 46 47 #include <sys/types.h> 48 #include <arm/cpuconf.h> 49 50 struct cpu_functions { 51 52 /* CPU functions */ 53 54 u_int (*cf_id) __P((void)); 55 void (*cf_cpwait) __P((void)); 56 57 /* MMU functions */ 58 59 u_int (*cf_control) __P((u_int bic, u_int eor)); 60 void (*cf_domains) __P((u_int domains)); 61 void (*cf_setttb) __P((u_int ttb)); 62 u_int (*cf_faultstatus) __P((void)); 63 u_int (*cf_faultaddress) __P((void)); 64 65 /* TLB functions */ 66 67 void (*cf_tlb_flushID) __P((void)); 68 void (*cf_tlb_flushID_SE) __P((u_int va)); 69 void (*cf_tlb_flushI) __P((void)); 70 void (*cf_tlb_flushI_SE) __P((u_int va)); 71 void (*cf_tlb_flushD) __P((void)); 72 void (*cf_tlb_flushD_SE) __P((u_int va)); 73 74 /* 75 * Cache operations: 76 * 77 * We define the following primitives: 78 * 79 * icache_sync_all Synchronize I-cache 80 * icache_sync_range Synchronize I-cache range 81 * 82 * dcache_wbinv_all Write-back and Invalidate D-cache 83 * dcache_wbinv_range Write-back and Invalidate D-cache range 84 * dcache_inv_range Invalidate D-cache range 85 * dcache_wb_range Write-back D-cache range 86 * 87 * idcache_wbinv_all Write-back and Invalidate D-cache, 88 * Invalidate I-cache 89 * idcache_wbinv_range Write-back and Invalidate D-cache, 90 * Invalidate I-cache range 91 * 92 * Note that the ARM term for "write-back" is "clean". We use 93 * the term "write-back" since it's a more common way to describe 94 * the operation. 95 * 96 * There are some rules that must be followed: 97 * 98 * I-cache Synch (all or range): 99 * The goal is to synchronize the instruction stream, 100 * so you may beed to write-back dirty D-cache blocks 101 * first. If a range is requested, and you can't 102 * synchronize just a range, you have to hit the whole 103 * thing. 104 * 105 * D-cache Write-Back and Invalidate range: 106 * If you can't WB-Inv a range, you must WB-Inv the 107 * entire D-cache. 108 * 109 * D-cache Invalidate: 110 * If you can't Inv the D-cache, you must Write-Back 111 * and Invalidate. Code that uses this operation 112 * MUST NOT assume that the D-cache will not be written 113 * back to memory. 114 * 115 * D-cache Write-Back: 116 * If you can't Write-back without doing an Inv, 117 * that's fine. Then treat this as a WB-Inv. 118 * Skipping the invalidate is merely an optimization. 119 * 120 * All operations: 121 * Valid virtual addresses must be passed to each 122 * cache operation. 123 */ 124 void (*cf_icache_sync_all) __P((void)); 125 void (*cf_icache_sync_range) __P((vaddr_t, vsize_t)); 126 127 void (*cf_dcache_wbinv_all) __P((void)); 128 void (*cf_dcache_wbinv_range) __P((vaddr_t, vsize_t)); 129 void (*cf_dcache_inv_range) __P((vaddr_t, vsize_t)); 130 void (*cf_dcache_wb_range) __P((vaddr_t, vsize_t)); 131 132 void (*cf_idcache_wbinv_all) __P((void)); 133 void (*cf_idcache_wbinv_range) __P((vaddr_t, vsize_t)); 134 135 /* Other functions */ 136 137 void (*cf_flush_prefetchbuf) __P((void)); 138 void (*cf_drain_writebuf) __P((void)); 139 void (*cf_flush_brnchtgt_C) __P((void)); 140 void (*cf_flush_brnchtgt_E) __P((u_int va)); 141 142 void (*cf_sleep) __P((int mode)); 143 144 /* Soft functions */ 145 146 int (*cf_dataabt_fixup) __P((void *arg)); 147 int (*cf_prefetchabt_fixup) __P((void *arg)); 148 149 void (*cf_context_switch) __P((void)); 150 151 void (*cf_setup) __P((char *string)); 152 }; 153 154 extern struct cpu_functions cpufuncs; 155 extern u_int cputype; 156 157 #define cpu_id() cpufuncs.cf_id() 158 #define cpu_cpwait() cpufuncs.cf_cpwait() 159 160 #define cpu_control(c, e) cpufuncs.cf_control(c, e) 161 #define cpu_domains(d) cpufuncs.cf_domains(d) 162 #define cpu_setttb(t) cpufuncs.cf_setttb(t) 163 #define cpu_faultstatus() cpufuncs.cf_faultstatus() 164 #define cpu_faultaddress() cpufuncs.cf_faultaddress() 165 166 #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() 167 #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) 168 #define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI() 169 #define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e) 170 #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() 171 #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) 172 173 #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all() 174 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) 175 176 #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() 177 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) 178 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) 179 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) 180 181 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() 182 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) 183 184 #define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf() 185 #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() 186 #define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C() 187 #define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e) 188 189 #define cpu_sleep(m) cpufuncs.cf_sleep(m) 190 191 #define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a) 192 #define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a) 193 #define ABORT_FIXUP_OK 0 /* fixup succeeded */ 194 #define ABORT_FIXUP_FAILED 1 /* fixup failed */ 195 #define ABORT_FIXUP_RETURN 2 /* abort handler should return */ 196 197 #define cpu_setup(a) cpufuncs.cf_setup(a) 198 199 int set_cpufuncs __P((void)); 200 #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ 201 #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ 202 203 void cpufunc_nullop __P((void)); 204 int cpufunc_null_fixup __P((void *)); 205 int early_abort_fixup __P((void *)); 206 int late_abort_fixup __P((void *)); 207 u_int cpufunc_id __P((void)); 208 u_int cpufunc_control __P((u_int clear, u_int bic)); 209 void cpufunc_domains __P((u_int domains)); 210 u_int cpufunc_faultstatus __P((void)); 211 u_int cpufunc_faultaddress __P((void)); 212 213 #ifdef CPU_ARM3 214 u_int arm3_control __P((u_int clear, u_int bic)); 215 void arm3_cache_flush __P((void)); 216 #endif /* CPU_ARM3 */ 217 218 #if defined(CPU_ARM6) || defined(CPU_ARM7) 219 void arm67_setttb __P((u_int ttb)); 220 void arm67_tlb_flush __P((void)); 221 void arm67_tlb_purge __P((u_int va)); 222 void arm67_cache_flush __P((void)); 223 void arm67_context_switch __P((void)); 224 #endif /* CPU_ARM6 || CPU_ARM7 */ 225 226 #ifdef CPU_ARM6 227 void arm6_setup __P((char *string)); 228 #endif /* CPU_ARM6 */ 229 230 #ifdef CPU_ARM7 231 void arm7_setup __P((char *string)); 232 #endif /* CPU_ARM7 */ 233 234 #ifdef CPU_ARM7TDMI 235 int arm7_dataabt_fixup __P((void *arg)); 236 void arm7tdmi_setup __P((char *string)); 237 void arm7tdmi_setttb __P((u_int ttb)); 238 void arm7tdmi_tlb_flushID __P((void)); 239 void arm7tdmi_tlb_flushID_SE __P((u_int va)); 240 void arm7tdmi_cache_flushID __P((void)); 241 void arm7tdmi_context_switch __P((void)); 242 #endif /* CPU_ARM7TDMI */ 243 244 #ifdef CPU_ARM8 245 void arm8_setttb __P((u_int ttb)); 246 void arm8_tlb_flushID __P((void)); 247 void arm8_tlb_flushID_SE __P((u_int va)); 248 void arm8_cache_flushID __P((void)); 249 void arm8_cache_flushID_E __P((u_int entry)); 250 void arm8_cache_cleanID __P((void)); 251 void arm8_cache_cleanID_E __P((u_int entry)); 252 void arm8_cache_purgeID __P((void)); 253 void arm8_cache_purgeID_E __P((u_int entry)); 254 255 void arm8_cache_syncI __P((void)); 256 void arm8_cache_cleanID_rng __P((vaddr_t start, vsize_t end)); 257 void arm8_cache_cleanD_rng __P((vaddr_t start, vsize_t end)); 258 void arm8_cache_purgeID_rng __P((vaddr_t start, vsize_t end)); 259 void arm8_cache_purgeD_rng __P((vaddr_t start, vsize_t end)); 260 void arm8_cache_syncI_rng __P((vaddr_t start, vsize_t end)); 261 262 void arm8_context_switch __P((void)); 263 264 void arm8_setup __P((char *string)); 265 266 u_int arm8_clock_config __P((u_int, u_int)); 267 #endif 268 269 #ifdef CPU_SA110 270 void sa110_setup __P((char *string)); 271 void sa110_context_switch __P((void)); 272 #endif /* CPU_SA110 */ 273 274 #if defined(CPU_SA1100) || defined(CPU_SA1110) 275 void sa11x0_drain_readbuf __P((void)); 276 277 void sa11x0_context_switch __P((void)); 278 void sa11x0_cpu_sleep __P((int mode)); 279 280 void sa11x0_setup __P((char *string)); 281 #endif 282 283 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) 284 void sa1_setttb __P((u_int ttb)); 285 286 void sa1_tlb_flushID_SE __P((u_int va)); 287 288 void sa1_cache_flushID __P((void)); 289 void sa1_cache_flushI __P((void)); 290 void sa1_cache_flushD __P((void)); 291 void sa1_cache_flushD_SE __P((u_int entry)); 292 293 void sa1_cache_cleanID __P((void)); 294 void sa1_cache_cleanD __P((void)); 295 void sa1_cache_cleanD_E __P((u_int entry)); 296 297 void sa1_cache_purgeID __P((void)); 298 void sa1_cache_purgeID_E __P((u_int entry)); 299 void sa1_cache_purgeD __P((void)); 300 void sa1_cache_purgeD_E __P((u_int entry)); 301 302 void sa1_cache_syncI __P((void)); 303 void sa1_cache_cleanID_rng __P((vaddr_t start, vsize_t end)); 304 void sa1_cache_cleanD_rng __P((vaddr_t start, vsize_t end)); 305 void sa1_cache_purgeID_rng __P((vaddr_t start, vsize_t end)); 306 void sa1_cache_purgeD_rng __P((vaddr_t start, vsize_t end)); 307 void sa1_cache_syncI_rng __P((vaddr_t start, vsize_t end)); 308 309 #endif 310 311 #ifdef CPU_ARM9 312 void arm9_setttb __P((u_int)); 313 314 void arm9_tlb_flushID_SE __P((u_int va)); 315 316 void arm9_cache_flushID __P((void)); 317 void arm9_cache_flushID_SE __P((u_int)); 318 void arm9_cache_flushI __P((void)); 319 void arm9_cache_flushI_SE __P((u_int)); 320 void arm9_cache_flushD __P((void)); 321 void arm9_cache_flushD_SE __P((u_int)); 322 323 void arm9_cache_cleanID __P((void)); 324 325 void arm9_cache_syncI __P((void)); 326 void arm9_cache_flushID_rng __P((vaddr_t, vsize_t)); 327 void arm9_cache_flushD_rng __P((vaddr_t, vsize_t)); 328 void arm9_cache_syncI_rng __P((vaddr_t, vsize_t)); 329 330 void arm9_context_switch __P((void)); 331 332 void arm9_setup __P((char *string)); 333 #endif 334 335 #if defined(CPU_ARM9) || defined(CPU_SA110) || defined(CPU_SA1100) || \ 336 defined(CPU_SA1110) || defined(CPU_XSCALE_80200) || \ 337 defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_PXA2X0) 338 339 void armv4_tlb_flushID __P((void)); 340 void armv4_tlb_flushI __P((void)); 341 void armv4_tlb_flushD __P((void)); 342 void armv4_tlb_flushD_SE __P((u_int va)); 343 344 void armv4_drain_writebuf __P((void)); 345 #endif 346 347 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 348 defined(CPU_XSCALE_PXA2X0) 349 void xscale_cpwait __P((void)); 350 351 void xscale_cpu_sleep __P((int mode)); 352 353 u_int xscale_control __P((u_int clear, u_int bic)); 354 355 void xscale_setttb __P((u_int ttb)); 356 357 void xscale_tlb_flushID_SE __P((u_int va)); 358 359 void xscale_cache_flushID __P((void)); 360 void xscale_cache_flushI __P((void)); 361 void xscale_cache_flushD __P((void)); 362 void xscale_cache_flushD_SE __P((u_int entry)); 363 364 void xscale_cache_cleanID __P((void)); 365 void xscale_cache_cleanD __P((void)); 366 void xscale_cache_cleanD_E __P((u_int entry)); 367 368 void xscale_cache_clean_minidata __P((void)); 369 370 void xscale_cache_purgeID __P((void)); 371 void xscale_cache_purgeID_E __P((u_int entry)); 372 void xscale_cache_purgeD __P((void)); 373 void xscale_cache_purgeD_E __P((u_int entry)); 374 375 void xscale_cache_syncI __P((void)); 376 void xscale_cache_cleanID_rng __P((vaddr_t start, vsize_t end)); 377 void xscale_cache_cleanD_rng __P((vaddr_t start, vsize_t end)); 378 void xscale_cache_purgeID_rng __P((vaddr_t start, vsize_t end)); 379 void xscale_cache_purgeD_rng __P((vaddr_t start, vsize_t end)); 380 void xscale_cache_syncI_rng __P((vaddr_t start, vsize_t end)); 381 382 /* Used in write-through mode. */ 383 void xscale_cache_flushID_rng __P((vaddr_t start, vsize_t end)); 384 void xscale_cache_flushD_rng __P((vaddr_t start, vsize_t end)); 385 void xscale_cache_flushI_rng __P((vaddr_t start, vsize_t end)); 386 387 void xscale_context_switch __P((void)); 388 389 void xscale_setup __P((char *string)); 390 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */ 391 392 #define tlb_flush cpu_tlb_flushID 393 #define setttb cpu_setttb 394 #define drain_writebuf cpu_drain_writebuf 395 396 /* 397 * Macros for manipulating CPU interrupts 398 */ 399 #ifdef __PROG32 400 #define disable_interrupts(mask) \ 401 (SetCPSR((mask) & (I32_bit | F32_bit), (mask) & (I32_bit | F32_bit))) 402 403 #define enable_interrupts(mask) \ 404 (SetCPSR((mask) & (I32_bit | F32_bit), 0)) 405 406 #define restore_interrupts(old_cpsr) \ 407 (SetCPSR((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit))) 408 #else /* ! __PROG32 */ 409 #define disable_interrupts(mask) \ 410 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), \ 411 (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE))) 412 413 #define enable_interrupts(mask) \ 414 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0)) 415 416 #define restore_interrupts(old_r15) \ 417 (set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE), \ 418 (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE))) 419 #endif /* __PROG32 */ 420 421 #ifdef __PROG32 422 /* Functions to manipulate the CPSR. */ 423 u_int SetCPSR(u_int bic, u_int eor); 424 u_int GetCPSR(void); 425 #else 426 /* Functions to manipulate the processor control bits in r15. */ 427 u_int set_r15(u_int bic, u_int eor); 428 u_int get_r15(void); 429 #endif /* __PROG32 */ 430 431 /* 432 * Functions to manipulate cpu r13 433 * (in arm/arm32/setstack.S) 434 */ 435 436 void set_stackptr __P((u_int mode, u_int address)); 437 u_int get_stackptr __P((u_int mode)); 438 439 /* 440 * Miscellany 441 */ 442 443 int get_pc_str_offset __P((void)); 444 445 /* 446 * CPU functions from locore.S 447 */ 448 449 void cpu_reset __P((void)) __attribute__((__noreturn__)); 450 451 /* 452 * Cache info variables. 453 */ 454 455 /* PRIMARY CACHE VARIABLES */ 456 int arm_picache_size; 457 int arm_picache_line_size; 458 int arm_picache_ways; 459 460 int arm_pdcache_size; /* and unified */ 461 int arm_pdcache_line_size; 462 int arm_pdcache_ways; 463 464 int arm_pcache_type; 465 int arm_pcache_unified; 466 467 int arm_dcache_align; 468 int arm_dcache_align_mask; 469 470 #endif /* _KERNEL */ 471 #endif /* _ARM32_CPUFUNC_H_ */ 472 473 /* End of cpufunc.h */ 474