1 /* $OpenBSD: cpu.h,v 1.130 2020/07/11 15:18:08 visa Exp $ */ 2 3 /*- 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Ralph Campbell and Rick Macklem. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (C) 1989 Digital Equipment Corporation. 35 * Permission to use, copy, modify, and distribute this software and 36 * its documentation for any purpose and without fee is hereby granted, 37 * provided that the above copyright notice appears in all copies. 38 * Digital Equipment Corporation makes no representations about the 39 * suitability of this software for any purpose. It is provided "as is" 40 * without express or implied warranty. 41 * 42 * from: @(#)cpu.h 8.4 (Berkeley) 1/4/94 43 */ 44 45 #ifndef _MIPS64_CPU_H_ 46 #define _MIPS64_CPU_H_ 47 48 #ifndef _LOCORE 49 50 /* 51 * MIPS32-style segment definitions. 52 * They only cover the first 512MB of physical addresses. 53 */ 54 #define CKSEG0_BASE 0xffffffff80000000UL 55 #define CKSEG1_BASE 0xffffffffa0000000UL 56 #define CKSSEG_BASE 0xffffffffc0000000UL 57 #define CKSEG3_BASE 0xffffffffe0000000UL 58 #define CKSEG_SIZE 0x0000000020000000UL 59 60 #define CKSEG0_TO_PHYS(x) ((u_long)(x) & (CKSEG_SIZE - 1)) 61 #define CKSEG1_TO_PHYS(x) ((u_long)(x) & (CKSEG_SIZE - 1)) 62 #define PHYS_TO_CKSEG0(x) ((u_long)(x) | CKSEG0_BASE) 63 #define PHYS_TO_CKSEG1(x) ((u_long)(x) | CKSEG1_BASE) 64 65 /* 66 * MIPS64-style segment definitions. 67 * These allow for 36 bits of addressable physical memory, thus 64GB. 68 */ 69 70 /* 71 * Cache Coherency Attributes. 72 */ 73 /* r8k only */ 74 #define CCA_NC_COPROCESSOR 0UL /* uncached, coprocessor ordered */ 75 /* common to r4, r5k, r8k and r1xk */ 76 #define CCA_NC 2UL /* uncached, write-around */ 77 #define CCA_NONCOHERENT 3UL /* cached, non-coherent, write-back */ 78 /* r8k, r1xk only */ 79 #define CCA_COHERENT_EXCL 4UL /* cached, coherent, exclusive */ 80 #define CCA_COHERENT_EXCLWRITE 5UL /* cached, coherent, exclusive write */ 81 /* r4k only */ 82 #define CCA_COHERENT_UPDWRITE 6UL /* cached, coherent, update on write */ 83 /* r1xk only */ 84 #define CCA_NC_ACCELERATED 7UL /* uncached accelerated */ 85 86 #ifdef TGT_COHERENT 87 #define CCA_CACHED CCA_COHERENT_EXCLWRITE 88 #else 89 #define CCA_CACHED CCA_NONCOHERENT 90 #endif 91 92 /* 93 * Uncached spaces. 94 * R1x000 processors use bits 58:57 of uncached virtual addresses (CCA_NC) 95 * to select different spaces. Unfortunately, other processors need these 96 * bits to be zero, so uncached address have to be decided at runtime. 97 */ 98 #define SP_HUB 0UL /* Hub space */ 99 #define SP_IO 1UL /* I/O space */ 100 #define SP_SPECIAL 2UL /* Memory Special space */ 101 #define SP_NC 3UL /* Memory Uncached space */ 102 103 #define XKSSSEG_BASE 0x4000000000000000UL 104 #define XKPHYS_BASE 0x8000000000000000UL 105 #define XKSSEG_BASE 0xc000000000000000UL 106 107 #define XKPHYS_TO_PHYS(x) ((paddr_t)(x) & 0x0000000fffffffffUL) 108 #define PHYS_TO_XKPHYS(x,c) ((paddr_t)(x) | XKPHYS_BASE | ((c) << 59)) 109 #define PHYS_TO_XKPHYS_UNCACHED(x,s) \ 110 (PHYS_TO_XKPHYS(x, CCA_NC) | ((s) << 57)) 111 #define IS_XKPHYS(va) (((va) >> 62) == 2) 112 #define XKPHYS_TO_CCA(x) (((x) >> 59) & 0x07) 113 #define XKPHYS_TO_SP(x) (((x) >> 57) & 0x03) 114 115 #endif /* _LOCORE */ 116 117 /* 118 * Exported definitions unique to mips cpu support. 119 */ 120 121 #if defined(_KERNEL) && !defined(_LOCORE) 122 123 #include <sys/device.h> 124 #include <machine/intr.h> 125 #include <sys/sched.h> 126 #include <sys/srp.h> 127 128 struct cpu_hwinfo { 129 uint32_t c0prid; 130 uint32_t c1prid; 131 uint32_t clock; /* Hz */ 132 uint32_t tlbsize; 133 uint type; 134 uint32_t l2size; 135 }; 136 137 /* 138 * Cache memory configuration. One struct per cache. 139 */ 140 struct cache_info { 141 uint size; /* total cache size */ 142 uint linesize; /* line size */ 143 uint setsize; /* set size */ 144 uint sets; /* number of sets */ 145 }; 146 147 struct cpu_info { 148 struct device *ci_dev; /* our device */ 149 struct cpu_info *ci_self; /* pointer to this structure */ 150 struct cpu_info *ci_next; /* next cpu */ 151 struct proc *ci_curproc; 152 struct user *ci_curprocpaddr; 153 struct proc *ci_fpuproc; /* pointer to last proc to use FP */ 154 uint32_t ci_delayconst; 155 struct cpu_hwinfo 156 ci_hw; 157 158 #if defined(MULTIPROCESSOR) 159 struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM]; 160 #endif 161 162 /* cache information and pending flush state */ 163 uint ci_cacheconfiguration; 164 uint64_t ci_cachepending_l1i; 165 struct cache_info 166 ci_l1inst, 167 ci_l1data, 168 ci_l2, 169 ci_l3; 170 171 /* function pointers for the cache handling routines */ 172 void (*ci_SyncCache)(struct cpu_info *); 173 void (*ci_InvalidateICache)(struct cpu_info *, vaddr_t, 174 size_t); 175 void (*ci_InvalidateICachePage)(struct cpu_info *, vaddr_t); 176 void (*ci_SyncICache)(struct cpu_info *); 177 void (*ci_SyncDCachePage)(struct cpu_info *, vaddr_t, 178 paddr_t); 179 void (*ci_HitSyncDCachePage)(struct cpu_info *, vaddr_t, 180 paddr_t); 181 void (*ci_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t); 182 void (*ci_HitInvalidateDCache)(struct cpu_info *, vaddr_t, 183 size_t); 184 void (*ci_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t, 185 int); 186 187 struct schedstate_percpu 188 ci_schedstate; 189 int ci_want_resched; /* need_resched() invoked */ 190 cpuid_t ci_cpuid; /* our CPU ID */ 191 uint32_t ci_randseed; /* per cpu random seed */ 192 volatile int ci_ipl; /* software IPL */ 193 uint32_t ci_softpending; /* pending soft interrupts */ 194 int ci_clock_started; 195 u_int32_t ci_cpu_counter_last; /* last compare value loaded */ 196 u_int32_t ci_cpu_counter_interval; /* # of counter ticks/tick */ 197 198 u_int32_t ci_pendingticks; 199 200 #ifdef TGT_ORIGIN 201 u_int16_t ci_nasid; 202 u_int16_t ci_slice; 203 #endif 204 205 struct pmap *ci_curpmap; 206 uint ci_intrdepth; /* interrupt depth */ 207 #ifdef MULTIPROCESSOR 208 u_long ci_flags; /* flags; see below */ 209 #endif 210 volatile int ci_ddb; 211 #define CI_DDB_RUNNING 0 212 #define CI_DDB_SHOULDSTOP 1 213 #define CI_DDB_STOPPED 2 214 #define CI_DDB_ENTERDDB 3 215 #define CI_DDB_INDDB 4 216 217 #ifdef DIAGNOSTIC 218 int ci_mutex_level; 219 #endif 220 #ifdef GPROF 221 struct gmonparam *ci_gmon; 222 #endif 223 }; 224 225 #define CPUF_PRIMARY 0x01 /* CPU is primary CPU */ 226 #define CPUF_PRESENT 0x02 /* CPU is present */ 227 #define CPUF_RUNNING 0x04 /* CPU is running */ 228 229 extern struct cpu_info cpu_info_primary; 230 extern struct cpu_info *cpu_info_list; 231 #define CPU_INFO_ITERATOR int 232 #define CPU_INFO_FOREACH(cii, ci) for (cii = 0, ci = cpu_info_list; \ 233 ci != NULL; ci = ci->ci_next) 234 235 #define CPU_INFO_UNIT(ci) ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0) 236 237 extern void (*cpu_idle_cycle_func)(void); 238 #define cpu_idle_cycle() (*cpu_idle_cycle_func)() 239 240 #ifdef MULTIPROCESSOR 241 #define getcurcpu() hw_getcurcpu() 242 #define setcurcpu(ci) hw_setcurcpu(ci) 243 extern struct cpu_info *get_cpu_info(int); 244 #define curcpu() getcurcpu() 245 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 246 #define cpu_number() (curcpu()->ci_cpuid) 247 248 extern struct cpuset cpus_running; 249 void cpu_unidle(struct cpu_info *); 250 void cpu_boot_secondary_processors(void); 251 #define cpu_boot_secondary(ci) hw_cpu_boot_secondary(ci) 252 #define cpu_hatch(ci) hw_cpu_hatch(ci) 253 254 vaddr_t alloc_contiguous_pages(size_t); 255 256 #define MIPS64_IPI_NOP 0x00000001 257 #define MIPS64_IPI_RENDEZVOUS 0x00000002 258 #define MIPS64_IPI_DDB 0x00000004 259 #define MIPS64_NIPIS 3 /* must not exceed 32 */ 260 261 void mips64_ipi_init(void); 262 void mips64_send_ipi(unsigned int, unsigned int); 263 void smp_rendezvous_cpus(unsigned long, void (*)(void *), void *arg); 264 265 #include <sys/mplock.h> 266 #else 267 #define MAXCPUS 1 268 #define curcpu() (&cpu_info_primary) 269 #define CPU_IS_PRIMARY(ci) 1 270 #define cpu_number() 0UL 271 #define cpu_unidle(ci) 272 #define get_cpu_info(i) (&cpu_info_primary) 273 #endif 274 275 #define CPU_BUSY_CYCLE() do {} while (0) 276 277 extern void (*md_startclock)(struct cpu_info *); 278 void cp0_calibrate(struct cpu_info *); 279 280 unsigned int cpu_rnd_messybits(void); 281 282 #include <machine/frame.h> 283 284 /* 285 * Arguments to hardclock encapsulate the previous machine state in 286 * an opaque clockframe. 287 */ 288 #define clockframe trapframe /* Use normal trap frame */ 289 290 #define SR_KSU_USER 0x00000010 291 #define CLKF_USERMODE(framep) ((framep)->sr & SR_KSU_USER) 292 #define CLKF_PC(framep) ((framep)->pc) 293 #define CLKF_INTR(framep) (curcpu()->ci_intrdepth > 1) /* XXX */ 294 295 /* 296 * This is used during profiling to integrate system time. 297 */ 298 #define PROC_PC(p) ((p)->p_md.md_regs->pc) 299 #define PROC_STACK(p) ((p)->p_md.md_regs->sp) 300 301 /* 302 * Preempt the current process if in interrupt from user mode, 303 * or after the current trap/syscall if in system mode. 304 */ 305 void need_resched(struct cpu_info *); 306 #define clear_resched(ci) (ci)->ci_want_resched = 0 307 308 /* 309 * Give a profiling tick to the current process when the user profiling 310 * buffer pages are invalid. On MIPS designs, request an ast to send us 311 * through trap, marking the proc as needing a profiling tick. 312 */ 313 #define need_proftick(p) aston(p) 314 315 /* 316 * Notify the current process (p) that it has a signal pending, 317 * process as soon as possible. 318 */ 319 void signotify(struct proc *); 320 321 #define aston(p) ((p)->p_md.md_astpending = 1) 322 323 #ifdef CPU_R8000 324 #define mips_sync() __asm__ volatile ("lw $0, 0(%0)" :: \ 325 "r" (PHYS_TO_XKPHYS(0, CCA_NC)) : "memory") 326 #else 327 #define mips_sync() __asm__ volatile ("sync" ::: "memory") 328 #endif 329 330 #endif /* _KERNEL && !_LOCORE */ 331 332 #ifdef _KERNEL 333 /* 334 * Values for the code field in a break instruction. 335 */ 336 #define BREAK_INSTR 0x0000000d 337 #define BREAK_VAL_MASK 0x03ff0000 338 #define BREAK_VAL_SHIFT 16 339 #define BREAK_KDB_VAL 512 340 #define BREAK_SSTEP_VAL 513 341 #define BREAK_BRKPT_VAL 514 342 #define BREAK_SOVER_VAL 515 343 #define BREAK_DDB_VAL 516 344 #define BREAK_FPUEMUL_VAL 517 345 #define BREAK_KDB (BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT)) 346 #define BREAK_SSTEP (BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT)) 347 #define BREAK_BRKPT (BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT)) 348 #define BREAK_SOVER (BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT)) 349 #define BREAK_DDB (BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT)) 350 #define BREAK_FPUEMUL (BREAK_INSTR | (BREAK_FPUEMUL_VAL << BREAK_VAL_SHIFT)) 351 352 #endif /* _KERNEL */ 353 354 /* 355 * CTL_MACHDEP definitions. 356 */ 357 #define CPU_ALLOWAPERTURE 1 /* allow mmap of /dev/xf86 */ 358 /* 2 formerly: keyboard reset */ 359 /* 3 formerly: CPU_LIDSUSPEND */ 360 #define CPU_LIDACTION 4 /* action caused by lid close */ 361 #define CPU_MAXID 5 /* number of valid machdep ids */ 362 363 #define CTL_MACHDEP_NAMES { \ 364 { 0, 0 }, \ 365 { "allowaperture", CTLTYPE_INT }, \ 366 { 0, 0 }, \ 367 { 0, 0 }, \ 368 { "lidaction", CTLTYPE_INT }, \ 369 } 370 371 /* 372 * MIPS CPU types (cp_imp). 373 */ 374 #define MIPS_R2000 0x01 /* MIPS R2000 CPU ISA I */ 375 #define MIPS_R3000 0x02 /* MIPS R3000 CPU ISA I */ 376 #define MIPS_R6000 0x03 /* MIPS R6000 CPU ISA II */ 377 #define MIPS_R4000 0x04 /* MIPS R4000/4400 CPU ISA III */ 378 #define MIPS_R3LSI 0x05 /* LSI Logic R3000 derivate ISA I */ 379 #define MIPS_R6000A 0x06 /* MIPS R6000A CPU ISA II */ 380 #define MIPS_CN50XX 0x06 /* Cavium OCTEON CN50xx MIPS64R2*/ 381 #define MIPS_R3IDT 0x07 /* IDT R3000 derivate ISA I */ 382 #define MIPS_R10000 0x09 /* MIPS R10000/T5 CPU ISA IV */ 383 #define MIPS_R4200 0x0a /* MIPS R4200 CPU (ICE) ISA III */ 384 #define MIPS_R4300 0x0b /* NEC VR4300 CPU ISA III */ 385 #define MIPS_R4100 0x0c /* NEC VR41xx CPU MIPS-16 ISA III */ 386 #define MIPS_R12000 0x0e /* MIPS R12000 ISA IV */ 387 #define MIPS_R14000 0x0f /* MIPS R14000 ISA IV */ 388 #define MIPS_R8000 0x10 /* MIPS R8000 Blackbird/TFP ISA IV */ 389 #define MIPS_R4600 0x20 /* PMCS R4600 Orion ISA III */ 390 #define MIPS_R4700 0x21 /* PMCS R4700 Orion ISA III */ 391 #define MIPS_R3TOSH 0x22 /* Toshiba R3000 based CPU ISA I */ 392 #define MIPS_R5000 0x23 /* MIPS R5000 CPU ISA IV */ 393 #define MIPS_RM7000 0x27 /* PMCS RM7000 CPU ISA IV */ 394 #define MIPS_RM52X0 0x28 /* PMCS RM52X0 CPU ISA IV */ 395 #define MIPS_RM9000 0x34 /* PMCS RM9000 CPU ISA IV */ 396 #define MIPS_LOONGSON 0x42 /* STC LoongSon CPU ISA III */ 397 #define MIPS_VR5400 0x54 /* NEC Vr5400 CPU ISA IV+ */ 398 #define MIPS_LOONGSON2 0x63 /* STC LoongSon2/3 CPU ISA III+ */ 399 #define MIPS_CN63XX 0x90 /* Cavium OCTEON II CN6[23]xx MIPS64R2 */ 400 #define MIPS_CN68XX 0x91 /* Cavium OCTEON II CN68xx MIPS64R2 */ 401 #define MIPS_CN66XX 0x92 /* Cavium OCTEON II CN66xx MIPS64R2 */ 402 #define MIPS_CN61XX 0x93 /* Cavium OCTEON II CN6[01]xx MIPS64R2 */ 403 #define MIPS_CN78XX 0x95 /* Cavium OCTEON III CN7[678]xx MIPS64R2 */ 404 #define MIPS_CN71XX 0x96 /* Cavium OCTEON III CN7[01]xx MIPS64R2 */ 405 #define MIPS_CN73XX 0x97 /* Cavium OCTEON III CN7[23]xx MIPS64R2 */ 406 407 /* 408 * MIPS FPU types. Only soft, rest is the same as cpu type. 409 */ 410 #define MIPS_SOFT 0x00 /* Software emulation ISA I */ 411 412 413 #if defined(_KERNEL) && !defined(_LOCORE) 414 415 extern register_t protosr; 416 extern int cpu_has_synced_cp0_count; 417 extern int cpu_has_userlocal; 418 419 #ifdef FPUEMUL 420 #define CPU_HAS_FPU(ci) ((ci)->ci_hw.c1prid != 0) 421 #else 422 #define CPU_HAS_FPU(ci) 1 423 #endif 424 425 struct exec_package; 426 struct user; 427 428 void tlb_asid_wrap(struct cpu_info *); 429 void tlb_flush(int); 430 void tlb_flush_addr(vaddr_t); 431 void tlb_init(unsigned int); 432 int64_t tlb_probe(vaddr_t); 433 void tlb_set_gbase(vaddr_t, vsize_t); 434 void tlb_set_page_mask(uint32_t); 435 void tlb_set_pid(u_int); 436 void tlb_set_wired(uint32_t); 437 int tlb_update(vaddr_t, register_t); 438 void tlb_update_indexed(vaddr_t, register_t, register_t, uint); 439 440 void build_trampoline(vaddr_t, vaddr_t); 441 void cpu_switchto_asm(struct proc *, struct proc *); 442 int exec_md_map(struct proc *, struct exec_package *); 443 void savectx(struct user *, int); 444 445 void enable_fpu(struct proc *); 446 void save_fpu(void); 447 int fpe_branch_emulate(struct proc *, struct trapframe *, uint32_t, 448 vaddr_t); 449 void MipsSaveCurFPState(struct proc *); 450 void MipsSaveCurFPState16(struct proc *); 451 void MipsSwitchFPState(struct proc *, struct trapframe *); 452 void MipsSwitchFPState16(struct proc *, struct trapframe *); 453 454 int guarded_read_1(paddr_t, uint8_t *); 455 int guarded_read_2(paddr_t, uint16_t *); 456 int guarded_read_4(paddr_t, uint32_t *); 457 int guarded_write_4(paddr_t, uint32_t); 458 459 void MipsFPTrap(struct trapframe *); 460 register_t MipsEmulateBranch(struct trapframe *, vaddr_t, uint32_t, uint32_t); 461 462 int classify_insn(uint32_t); 463 #define INSNCLASS_NEUTRAL 0 464 #define INSNCLASS_CALL 1 465 #define INSNCLASS_BRANCH 2 466 467 /* 468 * R4000 end-of-page errata workaround routines 469 */ 470 471 extern int r4000_errata; 472 u_int eop_page_check(paddr_t); 473 void eop_tlb_flush_addr(struct pmap *, vaddr_t, u_long); 474 int eop_tlb_miss_handler(struct trapframe *, struct cpu_info *, 475 struct proc *); 476 void eop_cleanup(struct trapframe *, struct proc *); 477 478 /* 479 * Low level access routines to CPU registers 480 */ 481 482 void setsoftintr0(void); 483 void clearsoftintr0(void); 484 void setsoftintr1(void); 485 void clearsoftintr1(void); 486 register_t enableintr(void); 487 register_t disableintr(void); 488 register_t getsr(void); 489 register_t setsr(register_t); 490 491 u_int cp0_get_count(void); 492 register_t cp0_get_config(void); 493 uint32_t cp0_get_config_1(void); 494 uint32_t cp0_get_config_2(void); 495 uint32_t cp0_get_config_3(void); 496 uint32_t cp0_get_config_4(void); 497 uint32_t cp0_get_pagegrain(void); 498 register_t cp0_get_prid(void); 499 void cp0_reset_cause(register_t); 500 void cp0_set_compare(u_int); 501 void cp0_set_config(register_t); 502 void cp0_set_pagegrain(uint32_t); 503 void cp0_set_trapbase(register_t); 504 u_int cp1_get_prid(void); 505 506 static inline uint32_t 507 cp0_get_hwrena(void) 508 { 509 uint32_t value; 510 __asm__ volatile ("mfc0 %0, $7" : "=r" (value)); 511 return value; 512 } 513 514 static inline void 515 cp0_set_hwrena(uint32_t value) 516 { 517 __asm__ volatile ("mtc0 %0, $7" : : "r" (value)); 518 } 519 520 static inline void 521 cp0_set_userlocal(void *value) 522 { 523 __asm__ volatile ( 524 " .set push\n" 525 " .set mips64r2\n" 526 " dmtc0 %0, $4, 2\n" 527 " .set pop\n" 528 : : "r" (value)); 529 } 530 531 static inline u_long 532 intr_disable(void) 533 { 534 return disableintr(); 535 } 536 537 static inline void 538 intr_restore(u_long sr) 539 { 540 setsr(sr); 541 } 542 543 /* 544 * Cache routines (may be overridden) 545 */ 546 547 #ifndef Mips_SyncCache 548 #define Mips_SyncCache(ci) \ 549 ((ci)->ci_SyncCache)(ci) 550 #endif 551 #ifndef Mips_InvalidateICache 552 #define Mips_InvalidateICache(ci, va, l) \ 553 ((ci)->ci_InvalidateICache)(ci, va, l) 554 #endif 555 #ifndef Mips_InvalidateICachePage 556 #define Mips_InvalidateICachePage(ci, va) \ 557 ((ci)->ci_InvalidateICachePage)(ci, va) 558 #endif 559 #ifndef Mips_SyncICache 560 #define Mips_SyncICache(ci) \ 561 ((ci)->ci_SyncICache)(ci) 562 #endif 563 #ifndef Mips_SyncDCachePage 564 #define Mips_SyncDCachePage(ci, va, pa) \ 565 ((ci)->ci_SyncDCachePage)(ci, va, pa) 566 #endif 567 #ifndef Mips_HitSyncDCachePage 568 #define Mips_HitSyncDCachePage(ci, va, pa) \ 569 ((ci)->ci_HitSyncDCachePage)(ci, va, pa) 570 #endif 571 #ifndef Mips_HitSyncDCache 572 #define Mips_HitSyncDCache(ci, va, l) \ 573 ((ci)->ci_HitSyncDCache)(ci, va, l) 574 #endif 575 #ifndef Mips_HitInvalidateDCache 576 #define Mips_HitInvalidateDCache(ci, va, l) \ 577 ((ci)->ci_HitInvalidateDCache)(ci, va, l) 578 #endif 579 #ifndef Mips_IOSyncDCache 580 #define Mips_IOSyncDCache(ci, va, l, h) \ 581 ((ci)->ci_IOSyncDCache)(ci, va, l, h) 582 #endif 583 584 #endif /* _KERNEL && !_LOCORE */ 585 #endif /* !_MIPS64_CPU_H_ */ 586