xref: /openbsd/sys/arch/powerpc/include/cpu.h (revision 8932bfb7)
1 /*	$OpenBSD: cpu.h,v 1.46 2010/09/28 20:27:55 miod Exp $	*/
2 /*	$NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #ifndef	_POWERPC_CPU_H_
35 #define	_POWERPC_CPU_H_
36 
37 #include <machine/frame.h>
38 
39 #include <sys/device.h>
40 #include <sys/lock.h>
41 #include <sys/sched.h>
42 
43 struct cpu_info {
44 	struct device *ci_dev;		/* our device */
45 	struct schedstate_percpu ci_schedstate; /* scheduler state */
46 
47 	struct proc *ci_curproc;
48 
49 	struct pcb *ci_curpcb;
50 	struct pmap *ci_curpm;
51 	struct proc *ci_fpuproc;
52 	struct proc *ci_vecproc;
53 	int ci_cpuid;
54 
55 	volatile int ci_want_resched;
56 	volatile int ci_cpl;
57 	volatile int ci_iactive;
58 #define		CI_IACTIVE_PROCESSING_SOFT	1
59 #define		CI_IACTIVE_PROCESSING_HARD	2
60 	volatile int ci_ipending;
61 
62 	int ci_intrdepth;
63 	char *ci_intstk;
64 #define CPUSAVE_LEN	8
65 	register_t ci_tempsave[CPUSAVE_LEN];
66 	register_t ci_ddbsave[CPUSAVE_LEN];
67 #define DISISAVE_LEN	4
68 	register_t ci_disisave[DISISAVE_LEN];
69 
70 	volatile u_int64_t ci_nexttimerevent;
71 	volatile u_int64_t ci_prevtb;
72 	volatile u_int64_t ci_lasttb;
73 	volatile u_int64_t ci_nextstatevent;
74 	int ci_statspending;
75 
76 	volatile int    ci_ddb_paused;
77 #define	CI_DDB_RUNNING	0
78 #define	CI_DDB_SHOULDSTOP	1
79 #define	CI_DDB_STOPPED		2
80 #define	CI_DDB_ENTERDDB		3
81 #define	CI_DDB_INDDB		4
82 
83 	u_int32_t ci_randseed;
84 
85 #ifdef DIAGNOSTIC
86 	int	ci_mutex_level;
87 #endif
88 };
89 
90 static __inline struct cpu_info *
91 curcpu(void)
92 {
93 	struct cpu_info *ci;
94 
95 	__asm volatile ("mfsprg %0,0" : "=r"(ci));
96 	return ci;
97 }
98 
99 #define	curpcb			(curcpu()->ci_curpcb)
100 #define	curpm			(curcpu()->ci_curpm)
101 
102 #define CPU_INFO_UNIT(ci)	((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
103 
104 #ifdef MULTIPROCESSOR
105 
106 #define PPC_MAXPROCS		4
107 
108 static __inline int
109 cpu_number(void)
110 {
111 	int pir;
112 
113 	pir = curcpu()->ci_cpuid;
114 	return pir;
115 }
116 
117 void	cpu_boot_secondary_processors(void);
118 
119 #define CPU_IS_PRIMARY(ci)	((ci)->ci_cpuid == 0)
120 #define CPU_INFO_ITERATOR		int
121 #define CPU_INFO_FOREACH(cii, ci)					\
122 	for (cii = 0, ci = &cpu_info[0]; cii < ncpus; cii++, ci++)
123 
124 void cpu_unidle(struct cpu_info *);
125 
126 #else
127 
128 #define PPC_MAXPROCS		1
129 
130 #define cpu_number()		0
131 
132 #define CPU_IS_PRIMARY(ci)	1
133 #define CPU_INFO_ITERATOR		int
134 #define CPU_INFO_FOREACH(cii, ci)					\
135 	for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
136 
137 #define cpu_unidle(ci)
138 
139 #endif
140 
141 #define MAXCPUS	PPC_MAXPROCS
142 
143 extern struct cpu_info cpu_info[PPC_MAXPROCS];
144 
145 #define	CLKF_USERMODE(frame)	(((frame)->srr1 & PSL_PR) != 0)
146 #define	CLKF_PC(frame)		((frame)->srr0)
147 #define	CLKF_INTR(frame)	((frame)->depth != 0)
148 
149 /*
150  * This is used during profiling to integrate system time.
151  */
152 #define	PROC_PC(p)		(trapframe(p)->srr0)
153 
154 void	delay(unsigned);
155 #define	DELAY(n)		delay(n)
156 
157 #define	aston(p)		((p)->p_md.md_astpending = 1)
158 
159 /*
160  * Preempt the current process if in interrupt from user mode,
161  * or after the current trap/syscall if in system mode.
162  */
163 #define	need_resched(ci) \
164 do {									\
165 	ci->ci_want_resched = 1;					\
166 	if (ci->ci_curproc != NULL)					\
167 		aston(ci->ci_curproc);					\
168 } while (0)
169 #define clear_resched(ci) (ci)->ci_want_resched = 0
170 
171 #define	need_proftick(p)	aston(p)
172 
173 void	signotify(struct proc *);
174 
175 extern char *bootpath;
176 
177 #ifndef	CACHELINESIZE
178 #define	CACHELINESIZE	32			/* For now		XXX */
179 #endif
180 
181 static __inline void
182 syncicache(void *from, int len)
183 {
184 	int l;
185 	char *p = from;
186 
187 	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
188 	l = len;
189 
190 	do {
191 		__asm __volatile ("dcbst 0,%0" :: "r"(p));
192 		p += CACHELINESIZE;
193 	} while ((l -= CACHELINESIZE) > 0);
194 	__asm __volatile ("sync");
195 	p = from;
196 	l = len;
197 	do {
198 		__asm __volatile ("icbi 0,%0" :: "r"(p));
199 		p += CACHELINESIZE;
200 	} while ((l -= CACHELINESIZE) > 0);
201 	__asm __volatile ("isync");
202 }
203 
204 static __inline void
205 invdcache(void *from, int len)
206 {
207 	int l;
208 	char *p = from;
209 
210 	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
211 	l = len;
212 
213 	do {
214 		__asm __volatile ("dcbi 0,%0" :: "r"(p));
215 		p += CACHELINESIZE;
216 	} while ((l -= CACHELINESIZE) > 0);
217 	__asm __volatile ("sync");
218 }
219 
220 #define FUNC_SPR(n, name) \
221 static __inline u_int32_t ppc_mf ## name (void)			\
222 {								\
223 	u_int32_t ret;						\
224 	__asm __volatile ("mfspr %0," # n : "=r" (ret));	\
225 	return ret;						\
226 }								\
227 static __inline void ppc_mt ## name (u_int32_t val)		\
228 {								\
229 	__asm __volatile ("mtspr "# n ",%0" :: "r" (val));	\
230 }								\
231 
232 FUNC_SPR(0, mq)
233 FUNC_SPR(1, xer)
234 FUNC_SPR(4, rtcu)
235 FUNC_SPR(5, rtcl)
236 FUNC_SPR(8, lr)
237 FUNC_SPR(9, ctr)
238 FUNC_SPR(18, dsisr)
239 FUNC_SPR(19, dar)
240 FUNC_SPR(22, dec)
241 FUNC_SPR(25, sdr1)
242 FUNC_SPR(26, srr0)
243 FUNC_SPR(27, srr1)
244 FUNC_SPR(256, vrsave)
245 FUNC_SPR(272, sprg0)
246 FUNC_SPR(273, sprg1)
247 FUNC_SPR(274, sprg2)
248 FUNC_SPR(275, sprg3)
249 FUNC_SPR(280, asr)
250 FUNC_SPR(282, ear)
251 FUNC_SPR(287, pvr)
252 FUNC_SPR(528, ibat0u)
253 FUNC_SPR(529, ibat0l)
254 FUNC_SPR(530, ibat1u)
255 FUNC_SPR(531, ibat1l)
256 FUNC_SPR(532, ibat2u)
257 FUNC_SPR(533, ibat2l)
258 FUNC_SPR(534, ibat3u)
259 FUNC_SPR(535, ibat3l)
260 FUNC_SPR(560, ibat4u)
261 FUNC_SPR(561, ibat4l)
262 FUNC_SPR(562, ibat5u)
263 FUNC_SPR(563, ibat5l)
264 FUNC_SPR(564, ibat6u)
265 FUNC_SPR(565, ibat6l)
266 FUNC_SPR(566, ibat7u)
267 FUNC_SPR(567, ibat7l)
268 FUNC_SPR(536, dbat0u)
269 FUNC_SPR(537, dbat0l)
270 FUNC_SPR(538, dbat1u)
271 FUNC_SPR(539, dbat1l)
272 FUNC_SPR(540, dbat2u)
273 FUNC_SPR(541, dbat2l)
274 FUNC_SPR(542, dbat3u)
275 FUNC_SPR(543, dbat3l)
276 FUNC_SPR(568, dbat4u)
277 FUNC_SPR(569, dbat4l)
278 FUNC_SPR(570, dbat5u)
279 FUNC_SPR(571, dbat5l)
280 FUNC_SPR(572, dbat6u)
281 FUNC_SPR(573, dbat6l)
282 FUNC_SPR(574, dbat7u)
283 FUNC_SPR(575, dbat7l)
284 FUNC_SPR(1008, hid0)
285 FUNC_SPR(1009, hid1)
286 FUNC_SPR(1010, iabr)
287 FUNC_SPR(1017, l2cr)
288 FUNC_SPR(1018, l3cr)
289 FUNC_SPR(1013, dabr)
290 FUNC_SPR(1023, pir)
291 
292 static __inline u_int32_t
293 ppc_mftbl (void)
294 {
295 	int ret;
296 	__asm __volatile ("mftb %0" : "=r" (ret));
297 	return ret;
298 }
299 
300 static __inline u_int64_t
301 ppc_mftb(void)
302 {
303 	u_long scratch;
304 	u_int64_t tb;
305 
306 	__asm __volatile ("1: mftbu %0; mftb %0+1; mftbu %1;"
307 	    " cmpw 0,%0,%1; bne 1b" : "=r"(tb), "=r"(scratch));
308 	return tb;
309 }
310 
311 static __inline u_int32_t
312 ppc_mfmsr (void)
313 {
314 	int ret;
315         __asm __volatile ("mfmsr %0" : "=r" (ret));
316 	return ret;
317 }
318 
319 static __inline void
320 ppc_mtmsr (u_int32_t val)
321 {
322         __asm __volatile ("mtmsr %0" :: "r" (val));
323 }
324 
325 static __inline void
326 ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted)
327 {
328 	__asm __volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted));
329 }
330 
331 u_int64_t ppc64_mfscomc(void);
332 void ppc_mtscomc(u_int32_t);
333 void ppc64_mtscomc(u_int64_t);
334 u_int64_t ppc64_mfscomd(void);
335 void ppc_mtscomd(u_int32_t);
336 
337 #include <machine/psl.h>
338 
339 /*
340  * General functions to enable and disable interrupts
341  * without having inlined assembly code in many functions.
342  */
343 static __inline void
344 ppc_intr_enable(int enable)
345 {
346 	u_int32_t msr;
347 	if (enable != 0) {
348 		msr = ppc_mfmsr();
349 		msr |= PSL_EE;
350 		ppc_mtmsr(msr);
351 	}
352 }
353 
354 static __inline int
355 ppc_intr_disable(void)
356 {
357 	u_int32_t emsr, dmsr;
358 	emsr = ppc_mfmsr();
359 	dmsr = emsr & ~PSL_EE;
360 	ppc_mtmsr(dmsr);
361 	return (emsr & PSL_EE);
362 }
363 
364 int ppc_cpuspeed(int *);
365 void ppc_check_procid(void);
366 extern int ppc_proc_is_64b;
367 
368 /*
369  * PowerPC CPU types
370  */
371 #define	PPC_CPU_MPC601		1
372 #define	PPC_CPU_MPC603		3
373 #define	PPC_CPU_MPC604		4
374 #define	PPC_CPU_MPC603e		6
375 #define	PPC_CPU_MPC603ev	7
376 #define	PPC_CPU_MPC750		8
377 #define	PPC_CPU_MPC604ev	9
378 #define	PPC_CPU_MPC7400		12
379 #define	PPC_CPU_IBM970FX	0x003c
380 #define	PPC_CPU_IBM970MP	0x0044
381 #define	PPC_CPU_IBM750FX	0x7000
382 #define	PPC_CPU_MPC7410		0x800c
383 #define	PPC_CPU_MPC7447A	0x8003
384 #define	PPC_CPU_MPC7448		0x8004
385 #define	PPC_CPU_MPC7450		0x8000
386 #define	PPC_CPU_MPC7455		0x8001
387 #define	PPC_CPU_MPC7457		0x8002
388 
389 /*
390  * This needs to be included late since it relies on definitions higher
391  * up in this file.
392  */
393 #if defined(MULTIPROCESSOR) && defined(_KERNEL)
394 #include <sys/mplock.h>
395 #endif
396 
397 #endif	/* _POWERPC_CPU_H_ */
398