1 /* $NetBSD: cpu.h,v 1.61 2022/02/23 21:54:40 andvar Exp $ */ 2 3 /*- 4 * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc. All rights reserved. 5 * Copyright (c) 1990 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)cpu.h 5.4 (Berkeley) 5/9/91 36 */ 37 38 /* 39 * SH3/SH4 support. 40 * 41 * T.Horiuchi Brains Corp. 5/22/98 42 */ 43 44 #ifndef _SH3_CPU_H_ 45 #define _SH3_CPU_H_ 46 47 #if defined(_KERNEL_OPT) 48 #include "opt_lockdebug.h" 49 #endif 50 51 #include <sh3/psl.h> 52 #include <sh3/frame.h> 53 54 #ifdef _KERNEL 55 #include <sys/cpu_data.h> 56 struct cpu_info { 57 struct cpu_data ci_data; /* MI per-cpu data */ 58 cpuid_t ci_cpuid; 59 int ci_mtx_count; 60 int ci_mtx_oldspl; 61 int ci_want_resched; 62 int ci_idepth; 63 struct lwp *ci_onproc; /* current user LWP / kthread */ 64 }; 65 66 extern struct cpu_info cpu_info_store; 67 #define curcpu() (&cpu_info_store) 68 69 /* 70 * definitions of cpu-dependent requirements 71 * referenced in generic code 72 */ 73 #define cpu_number() 0 74 75 #define cpu_proc_fork(p1, p2) /* nothing */ 76 77 /* 78 * Arguments to hardclock and gatherstats encapsulate the previous 79 * machine state in an opaque clockframe. 80 */ 81 struct clockframe { 82 int spc; /* program counter at time of interrupt */ 83 int ssr; /* status register at time of interrupt */ 84 int ssp; /* stack pointer at time of interrupt */ 85 }; 86 87 88 #define CLKF_USERMODE(cf) (!KERNELMODE((cf)->ssr)) 89 #define CLKF_PC(cf) ((cf)->spc) 90 #define CLKF_INTR(cf) (curcpu()->ci_idepth > 0) 91 92 /* 93 * This is used during profiling to integrate system time. It can safely 94 * assume that the process is resident. 95 */ 96 #define LWP_PC(l) \ 97 (((struct trapframe *)(l)->l_md.md_regs)->tf_spc) 98 99 /* 100 * Preempt the current process if in interrupt from user mode, 101 * or after the current trap/syscall if in system mode. 102 */ 103 #define cpu_need_resched(ci,l,flags) \ 104 do { \ 105 if ((flags & RESCHED_IDLE) == 0) \ 106 aston(curlwp); \ 107 } while (/*CONSTCOND*/0) 108 109 /* 110 * Give a profiling tick to the current process when the user profiling 111 * buffer pages are invalid. On the MIPS, request an ast to send us 112 * through trap, marking the proc as needing a profiling tick. 113 */ 114 #define cpu_need_proftick(l) \ 115 do { \ 116 (l)->l_pflag |= LP_OWEUPC; \ 117 aston(l); \ 118 } while (/*CONSTCOND*/0) 119 120 /* 121 * Notify the current process (p) that it has a signal pending, 122 * process as soon as possible. 123 */ 124 #define cpu_signotify(l) aston(l) 125 126 #define aston(l) ((l)->l_md.md_astpending = 1) 127 128 /* 129 * We need a machine-independent name for this. 130 */ 131 #define DELAY(x) delay(x) 132 #endif /* _KERNEL */ 133 134 /* 135 * Logical address space of SH3/SH4 CPU. 136 */ 137 #define SH3_PHYS_MASK 0x1fffffff 138 139 #define SH3_P0SEG_BASE 0x00000000 /* TLB mapped, also U0SEG */ 140 #define SH3_P0SEG_END 0x7fffffff 141 #define SH3_P1SEG_BASE 0x80000000 /* pa == va */ 142 #define SH3_P1SEG_END 0x9fffffff 143 #define SH3_P2SEG_BASE 0xa0000000 /* pa == va, non-cacheable */ 144 #define SH3_P2SEG_END 0xbfffffff 145 #define SH3_P3SEG_BASE 0xc0000000 /* TLB mapped, kernel mode */ 146 #define SH3_P3SEG_END 0xdfffffff 147 #define SH3_P4SEG_BASE 0xe0000000 /* peripheral space */ 148 #define SH3_P4SEG_END 0xffffffff 149 150 #define SH3_P1SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK) 151 #define SH3_P2SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK) 152 #define SH3_PHYS_TO_P1SEG(x) ((uint32_t)(x) | SH3_P1SEG_BASE) 153 #define SH3_PHYS_TO_P2SEG(x) ((uint32_t)(x) | SH3_P2SEG_BASE) 154 #define SH3_P1SEG_TO_P2SEG(x) ((uint32_t)(x) + 0x20000000u) 155 #define SH3_P2SEG_TO_P1SEG(x) ((uint32_t)(x) - 0x20000000u) 156 157 #ifdef __GNUC__ 158 #define SH3_P2SEG_FUNC(f) ((__typeof__(f) *)SH3_P1SEG_TO_P2SEG(f)) 159 #else 160 #define SH3_P2SEG_FUNC(f) ((void *)SH3_P1SEG_TO_P2SEG(f)) 161 #endif 162 163 #ifndef __lint__ 164 165 /* 166 * Switch from P1 (cached) to P2 (uncached). This used to be written 167 * using gcc's assigned goto extension, but gcc4 aggressive optimizations 168 * tend to optimize that away under certain circumstances. 169 */ 170 #define RUN_P2 \ 171 do { \ 172 register uint32_t r0 asm("r0"); \ 173 uint32_t pc; \ 174 __asm volatile( \ 175 " mov.l 1f, %1 ;" \ 176 " mova 2f, %0 ;" \ 177 " or %0, %1 ;" \ 178 " jmp @%1 ;" \ 179 " nop ;" \ 180 " .align 2 ;" \ 181 "1: .long 0x20000000;" \ 182 "2:;" \ 183 : "=r"(r0), "=r"(pc)); \ 184 } while (0) 185 186 /* 187 * Switch from P2 (uncached) back to P1 (cached). We need to be 188 * running on P2 to access cache control, memory-mapped cache and TLB 189 * arrays, etc. and after touching them at least 8 instructions are 190 * necessary before jumping to P1, so provide that padding here. 191 */ 192 #define RUN_P1 \ 193 do { \ 194 register uint32_t r0 asm("r0"); \ 195 uint32_t pc; \ 196 __asm volatile( \ 197 /*1*/ " mov.l 1f, %1 ;" \ 198 /*2*/ " mova 2f, %0 ;" \ 199 /*3*/ " nop ;" \ 200 /*4*/ " and %0, %1 ;" \ 201 /*5*/ " nop ;" \ 202 /*6*/ " nop ;" \ 203 /*7*/ " nop ;" \ 204 /*8*/ " nop ;" \ 205 " jmp @%1 ;" \ 206 " nop ;" \ 207 " .align 2 ;" \ 208 "1: .long ~0x20000000;" \ 209 "2:;" \ 210 : "=r"(r0), "=r"(pc)); \ 211 } while (0) 212 213 /* 214 * If RUN_P1 is the last thing we do in a function we can omit it, b/c 215 * we are going to return to a P1 caller anyway, but we still need to 216 * ensure there's at least 8 instructions before jump to P1. 217 */ 218 #define PAD_P1_SWITCH __asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;") 219 220 #else /* __lint__ */ 221 #define RUN_P2 do {} while (/* CONSTCOND */ 0) 222 #define RUN_P1 do {} while (/* CONSTCOND */ 0) 223 #define PAD_P1_SWITCH do {} while (/* CONSTCOND */ 0) 224 #endif 225 226 #if defined(SH4) 227 /* SH4 Processor Version Register */ 228 #define SH4_PVR_ADDR 0xff000030 /* P4 address */ 229 #define SH4_PVR (*(volatile uint32_t *) SH4_PVR_ADDR) 230 #define SH4_PRR_ADDR 0xff000044 /* P4 address */ 231 #define SH4_PRR (*(volatile uint32_t *) SH4_PRR_ADDR) 232 233 #define SH4_PVR_MASK 0xffffff00 234 #define SH4_PVR_SH7750 0x04020500 /* SH7750 */ 235 #define SH4_PVR_SH7750S 0x04020600 /* SH7750S */ 236 #define SH4_PVR_SH775xR 0x04050000 /* SH775xR */ 237 #define SH4_PVR_SH7751 0x04110000 /* SH7751 */ 238 239 #define SH4_PRR_MASK 0xfffffff0 240 #define SH4_PRR_7750R 0x00000100 /* SH7750R */ 241 #define SH4_PRR_7751R 0x00000110 /* SH7751R */ 242 #endif 243 244 /* 245 * pull in #defines for kinds of processors 246 */ 247 #include <machine/cputypes.h> 248 249 /* 250 * CTL_MACHDEP definitions. 251 */ 252 #define CPU_CONSDEV 1 /* dev_t: console terminal device */ 253 #define CPU_LOADANDRESET 2 /* load kernel image and reset */ 254 255 #ifdef _KERNEL 256 void sh_cpu_init(int, int); 257 void sh_startup(void); 258 void cpu_reset(void) __attribute__((__noreturn__)); /* soft reset */ 259 void _cpu_spin(uint32_t); /* for delay loop. */ 260 void delay(int); 261 struct pcb; 262 void savectx(struct pcb *); 263 void dumpsys(void); 264 #endif /* _KERNEL */ 265 #endif /* !_SH3_CPU_H_ */ 266