1 /* $OpenBSD: cpu.h,v 1.42 2024/11/06 18:59:09 miod Exp $ */
2 /* $NetBSD: cpu.h,v 1.41 2006/01/21 04:24:12 uwe Exp $ */
3
4 /*-
5 * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
6 * Copyright (c) 1990 The Regents of the University of California.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * William Jolitz.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)cpu.h 5.4 (Berkeley) 5/9/91
37 */
38
39 /*
40 * SH3/SH4 support.
41 *
42 * T.Horiuchi Brains Corp. 5/22/98
43 */
44
45 #ifndef _SH_CPU_H_
46 #define _SH_CPU_H_
47
48 #include <sh/psl.h>
49 #include <sh/frame.h>
50
51 #ifdef _KERNEL
52
53 /*
54 * Per-CPU information.
55 */
56
57 #include <machine/intr.h>
58 #include <sys/clockintr.h>
59 #include <sys/sched.h>
60
61 struct cpu_info {
62 struct proc *ci_curproc;
63
64 struct schedstate_percpu ci_schedstate; /* scheduler state */
65 u_int32_t ci_randseed;
66 #ifdef DIAGNOSTIC
67 int ci_mutex_level;
68 #endif
69 #ifdef GPROF
70 struct gmonparam *ci_gmon;
71 struct clockintr ci_gmonclock;
72 #endif
73
74 int ci_want_resched;
75 int ci_idepth;
76
77 struct clockqueue ci_queue;
78
79 char ci_panicbuf[512];
80 };
81
82 extern struct cpu_info cpu_info_store;
83 #define curcpu() (&cpu_info_store)
84 #define cpu_number() 0
85 #define CPU_IS_PRIMARY(ci) 1
86 #define CPU_IS_RUNNING(ci) 1
87 #define CPU_INFO_ITERATOR int
88 #define CPU_INFO_FOREACH(cii, ci) \
89 for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
90 #define CPU_INFO_UNIT(ci) 0
91 #define MAXCPUS 1
92 #define cpu_unidle(ci)
93
94 #define CPU_BUSY_CYCLE() __asm volatile ("" ::: "memory")
95
96
97 /*
98 * Arguments to clockintr_dispatch encapsulate the previous
99 * machine state in an opaque clockframe.
100 */
101 struct clockframe {
102 int ssr; /* status register at time of interrupt */
103 int spc; /* program counter at time of interrupt */
104 };
105
106 #define CLKF_USERMODE(cf) (!KERNELMODE((cf)->ssr))
107 #define CLKF_PC(cf) ((cf)->spc)
108 #define CLKF_INTR(cf) (curcpu()->ci_idepth > 1)
109
110 /*
111 * This is used during profiling to integrate system time. It can safely
112 * assume that the process is resident.
113 */
114 #define PROC_PC(p) ((p)->p_md.md_regs->tf_spc)
115 #define PROC_STACK(p) ((p)->p_md.md_regs->tf_r15)
116
117 /*
118 * Preempt the current process if in interrupt from user mode,
119 * or after the current trap/syscall if in system mode.
120 */
121 void need_resched(struct cpu_info *);
122 #define clear_resched(ci) (ci)->ci_want_resched = 0
123
124 /*
125 * Give a profiling tick to the current process when the user profiling
126 * buffer pages are invalid. On the MIPS, request an ast to send us
127 * through trap, marking the proc as needing a profiling tick.
128 */
129 #define need_proftick(p) aston(p)
130
131 /*
132 * Notify the current process (p) that it has a signal pending,
133 * process as soon as possible.
134 */
135 #define signotify(p) aston(p)
136
137 #define aston(p) ((p)->p_md.md_astpending = 1)
138
139 /*
140 * We need a machine-independent name for this.
141 */
142 #define DELAY(x) delay(x)
143
144 #define cpu_idle_enter() do { /* nothing */ } while (0)
145 #define cpu_idle_cycle() __asm volatile("sleep")
146 #define cpu_idle_leave() do { /* nothing */ } while (0)
147
148 #endif /* _KERNEL */
149
150 /*
151 * Logical address space of SH3/SH4 CPU.
152 */
153 #define SH3_PHYS_MASK 0x1fffffff
154
155 #define SH3_P0SEG_BASE 0x00000000 /* TLB mapped, also U0SEG */
156 #define SH3_P0SEG_END 0x7fffffff
157 #define SH3_P1SEG_BASE 0x80000000 /* pa == va */
158 #define SH3_P1SEG_END 0x9fffffff
159 #define SH3_P2SEG_BASE 0xa0000000 /* pa == va, non-cacheable */
160 #define SH3_P2SEG_END 0xbfffffff
161 #define SH3_P3SEG_BASE 0xc0000000 /* TLB mapped, kernel mode */
162 #define SH3_P3SEG_END 0xdfffffff
163 #define SH3_P4SEG_BASE 0xe0000000 /* peripheral space */
164 #define SH3_P4SEG_END 0xffffffff
165
166 #define SH3_P1SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
167 #define SH3_P2SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK)
168 #define SH3_PHYS_TO_P1SEG(x) ((uint32_t)(x) | SH3_P1SEG_BASE)
169 #define SH3_PHYS_TO_P2SEG(x) ((uint32_t)(x) | SH3_P2SEG_BASE)
170 #define SH3_P1SEG_TO_P2SEG(x) ((uint32_t)(x) | 0x20000000)
171 #define SH3_P2SEG_TO_P1SEG(x) ((uint32_t)(x) & ~0x20000000)
172
173 #ifdef _KERNEL
174 #ifndef __lint__
175
176 /*
177 * Switch from P1 (cached) to P2 (uncached). This used to be written
178 * using gcc's assigned goto extension, but gcc4 aggressive optimizations
179 * tend to optimize that away under certain circumstances.
180 */
181 #define RUN_P2 \
182 do { \
183 register uint32_t r0 asm("r0"); \
184 uint32_t pc; \
185 __asm volatile( \
186 " mov.l 1f, %1 ;" \
187 " mova 2f, %0 ;" \
188 " or %0, %1 ;" \
189 " jmp @%1 ;" \
190 " nop ;" \
191 " .align 2 ;" \
192 "1: .long 0x20000000;" \
193 "2:;" \
194 : "=r"(r0), "=r"(pc)); \
195 } while (0)
196
197 /*
198 * Switch from P2 (uncached) back to P1 (cached). We need to be
199 * running on P2 to access cache control, memory-mapped cache and TLB
200 * arrays, etc. and after touching them at least 8 instructions are
201 * necessary before jumping to P1, so provide that padding here.
202 */
203 #define RUN_P1 \
204 do { \
205 register uint32_t r0 asm("r0"); \
206 uint32_t pc; \
207 __asm volatile( \
208 /*1*/ " mov.l 1f, %1 ;" \
209 /*2*/ " mova 2f, %0 ;" \
210 /*3*/ " nop ;" \
211 /*4*/ " and %0, %1 ;" \
212 /*5*/ " nop ;" \
213 /*6*/ " nop ;" \
214 /*7*/ " nop ;" \
215 /*8*/ " nop ;" \
216 " jmp @%1 ;" \
217 " nop ;" \
218 " .align 2 ;" \
219 "1: .long ~0x20000000;" \
220 "2:;" \
221 : "=r"(r0), "=r"(pc)); \
222 } while (0)
223
224 /*
225 * If RUN_P1 is the last thing we do in a function we can omit it, b/c
226 * we are going to return to a P1 caller anyway, but we still need to
227 * ensure there's at least 8 instructions before jump to P1.
228 */
229 #define PAD_P1_SWITCH __asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;")
230
231 #else /* __lint__ */
232 #define RUN_P2 do {} while (/* CONSTCOND */ 0)
233 #define RUN_P1 do {} while (/* CONSTCOND */ 0)
234 #define PAD_P1_SWITCH do {} while (/* CONSTCOND */ 0)
235 #endif
236 #endif
237
238 #if defined(SH4)
239 /* SH4 Processor Version Register */
240 #define SH4_PVR_ADDR 0xff000030 /* P4 address */
241 #define SH4_PVR (*(volatile uint32_t *) SH4_PVR_ADDR)
242 #define SH4_PRR_ADDR 0xff000044 /* P4 address */
243 #define SH4_PRR (*(volatile uint32_t *) SH4_PRR_ADDR)
244
245 #define SH4_PVR_MASK 0xffffff00
246 #define SH4_PVR_SH7750 0x04020500 /* SH7750 */
247 #define SH4_PVR_SH7750S 0x04020600 /* SH7750S */
248 #define SH4_PVR_SH775xR 0x04050000 /* SH775xR */
249 #define SH4_PVR_SH7751 0x04110000 /* SH7751 */
250
251 #define SH4_PRR_MASK 0xfffffff0
252 #define SH4_PRR_7750R 0x00000100 /* SH7750R */
253 #define SH4_PRR_7751R 0x00000110 /* SH7751R */
254 #endif
255
256 /*
257 * pull in #defines for kinds of processors
258 */
259 #include <machine/cputypes.h>
260
261 #ifdef _KERNEL
262 void sh_cpu_init(int, int);
263 void sh_startup(void);
264 __dead void cpu_reset(void); /* soft reset */
265 void _cpu_spin(uint32_t); /* for delay loop. */
266 void delay(int);
267 struct pcb;
268 void savectx(struct pcb *);
269 struct fpreg;
270 void fpu_save(struct fpreg *);
271 void fpu_restore(struct fpreg *);
272 u_int cpu_dump(int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *);
273 u_int cpu_dumpsize(void);
274 void dumpconf(void);
275 void dumpsys(void);
276 unsigned int cpu_rnd_messybits(void);
277
278 static inline u_long
intr_disable(void)279 intr_disable(void)
280 {
281 return (u_long)_cpu_intr_suspend();
282 }
283
284 static inline void
intr_restore(u_long s)285 intr_restore(u_long s)
286 {
287 _cpu_intr_resume((int)s);
288 }
289 #endif /* _KERNEL */
290 #endif /* !_SH_CPU_H_ */
291