xref: /netbsd/sys/arch/arm/include/cpufunc.h (revision 54610a8e)
1 /*	cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp	*/
2 
3 /*
4  * Copyright (c) 1997 Mark Brinicombe.
5  * Copyright (c) 1997 Causality Limited
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Causality Limited.
19  * 4. The name of Causality Limited may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * RiscBSD kernel project
36  *
37  * cpufunc.h
38  *
39  * Prototypes for cpu, mmu and tlb related functions.
40  */
41 
42 #ifndef _ARM_CPUFUNC_H_
43 #define _ARM_CPUFUNC_H_
44 
45 #ifdef _ARM_ARCH_7
46 /*
47  * Options for DMB and DSB:
48  *	oshld	Outer Shareable, load
49  *	oshst	Outer Shareable, store
50  *	osh	Outer Shareable, all
51  *	nshld	Non-shareable, load
52  *	nshst	Non-shareable, store
53  *	nsh	Non-shareable, all
54  *	ishld	Inner Shareable, load
55  *	ishst	Inner Shareable, store
56  *	ish	Inner Shareable, all
57  *	ld	Full system, load
58  *	st	Full system, store
59  *	sy	Full system, all
60  */
61 #define	dsb(opt)	__asm __volatile("dsb " __STRING(opt) : : : "memory")
62 #define	dmb(opt)	__asm __volatile("dmb " __STRING(opt) : : : "memory")
63 #define	isb()		__asm __volatile("isb" : : : "memory")
64 #define	sev()		__asm __volatile("sev" : : : "memory")
65 
66 #else
67 
68 #define dsb(opt)	\
69 	__asm __volatile("mcr p15, 0, %0, c7, c10, 4" :: "r" (0) : "memory")
70 #define dmb(opt)	\
71 	__asm __volatile("mcr p15, 0, %0, c7, c10, 5" :: "r" (0) : "memory")
72 #define isb()		\
73 	__asm __volatile("mcr p15, 0, %0, c7, c5, 4" :: "r" (0) : "memory")
74 #define sev()		__nothing
75 
76 #endif
77 
78 #ifdef __arm__
79 
80 #ifdef _KERNEL
81 
82 #include <sys/types.h>
83 
84 #include <arm/armreg.h>
85 #include <arm/cpuconf.h>
86 #include <arm/cpufunc_proto.h>
87 
88 struct cpu_functions {
89 
90 	/* CPU functions */
91 
92 	u_int	(*cf_id)		(void);
93 	void	(*cf_cpwait)		(void);
94 
95 	/* MMU functions */
96 
97 	u_int	(*cf_control)		(u_int, u_int);
98 	void	(*cf_domains)		(u_int);
99 #if defined(ARM_MMU_EXTENDED)
100 	void	(*cf_setttb)		(u_int, tlb_asid_t);
101 #else
102 	void	(*cf_setttb)		(u_int, bool);
103 #endif
104 	u_int	(*cf_faultstatus)	(void);
105 	u_int	(*cf_faultaddress)	(void);
106 
107 	/* TLB functions */
108 
109 	void	(*cf_tlb_flushID)	(void);
110 	void	(*cf_tlb_flushID_SE)	(vaddr_t);
111 	void	(*cf_tlb_flushI)	(void);
112 	void	(*cf_tlb_flushI_SE)	(vaddr_t);
113 	void	(*cf_tlb_flushD)	(void);
114 	void	(*cf_tlb_flushD_SE)	(vaddr_t);
115 
116 	/*
117 	 * Cache operations:
118 	 *
119 	 * We define the following primitives:
120 	 *
121 	 *	icache_sync_all		Synchronize I-cache
122 	 *	icache_sync_range	Synchronize I-cache range
123 	 *
124 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
125 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
126 	 *	dcache_inv_range	Invalidate D-cache range
127 	 *	dcache_wb_range		Write-back D-cache range
128 	 *
129 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
130 	 *				Invalidate I-cache
131 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
132 	 *				Invalidate I-cache range
133 	 *
134 	 * Note that the ARM term for "write-back" is "clean".  We use
135 	 * the term "write-back" since it's a more common way to describe
136 	 * the operation.
137 	 *
138 	 * There are some rules that must be followed:
139 	 *
140 	 *	I-cache Synch (all or range):
141 	 *		The goal is to synchronize the instruction stream,
142 	 *		so you may beed to write-back dirty D-cache blocks
143 	 *		first.  If a range is requested, and you can't
144 	 *		synchronize just a range, you have to hit the whole
145 	 *		thing.
146 	 *
147 	 *	D-cache Write-Back and Invalidate range:
148 	 *		If you can't WB-Inv a range, you must WB-Inv the
149 	 *		entire D-cache.
150 	 *
151 	 *	D-cache Invalidate:
152 	 *		If you can't Inv the D-cache, you must Write-Back
153 	 *		and Invalidate.  Code that uses this operation
154 	 *		MUST NOT assume that the D-cache will not be written
155 	 *		back to memory.
156 	 *
157 	 *	D-cache Write-Back:
158 	 *		If you can't Write-back without doing an Inv,
159 	 *		that's fine.  Then treat this as a WB-Inv.
160 	 *		Skipping the invalidate is merely an optimization.
161 	 *
162 	 *	All operations:
163 	 *		Valid virtual addresses must be passed to each
164 	 *		cache operation.
165 	 */
166 	void	(*cf_icache_sync_all)	(void);
167 	void	(*cf_icache_sync_range)	(vaddr_t, vsize_t);
168 
169 	void	(*cf_dcache_wbinv_all)	(void);
170 	void	(*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
171 	void	(*cf_dcache_inv_range)	(vaddr_t, vsize_t);
172 	void	(*cf_dcache_wb_range)	(vaddr_t, vsize_t);
173 
174 	void	(*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t);
175 	void	(*cf_sdcache_inv_range)	(vaddr_t, paddr_t, psize_t);
176 	void	(*cf_sdcache_wb_range)	(vaddr_t, paddr_t, psize_t);
177 
178 	void	(*cf_idcache_wbinv_all)	(void);
179 	void	(*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
180 
181 	/* Other functions */
182 
183 	void	(*cf_flush_prefetchbuf)	(void);
184 	void	(*cf_drain_writebuf)	(void);
185 	void	(*cf_flush_brnchtgt_C)	(void);
186 	void	(*cf_flush_brnchtgt_E)	(u_int);
187 
188 	void	(*cf_sleep)		(int mode);
189 
190 	/* Soft functions */
191 
192 	int	(*cf_dataabt_fixup)	(void *);
193 	int	(*cf_prefetchabt_fixup)	(void *);
194 
195 #if defined(ARM_MMU_EXTENDED)
196 	void	(*cf_context_switch)	(u_int, tlb_asid_t);
197 #else
198 	void	(*cf_context_switch)	(u_int);
199 #endif
200 
201 	void	(*cf_setup)		(char *);
202 };
203 
204 extern struct cpu_functions cpufuncs;
205 extern u_int cputype;
206 
207 #define cpu_idnum()		cpufuncs.cf_id()
208 
209 #define cpu_control(c, e)	cpufuncs.cf_control(c, e)
210 #define cpu_domains(d)		cpufuncs.cf_domains(d)
211 #define cpu_setttb(t, f)	cpufuncs.cf_setttb(t, f)
212 #define cpu_faultstatus()	cpufuncs.cf_faultstatus()
213 #define cpu_faultaddress()	cpufuncs.cf_faultaddress()
214 
215 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
216 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
217 #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
218 #define	cpu_tlb_flushI_SE(e)	cpufuncs.cf_tlb_flushI_SE(e)
219 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
220 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
221 
222 #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
223 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
224 
225 #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
226 #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
227 #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
228 #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
229 
230 #define	cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s))
231 #define	cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s))
232 #define	cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s))
233 
234 #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
235 #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
236 
237 #define	cpu_flush_prefetchbuf()	cpufuncs.cf_flush_prefetchbuf()
238 #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
239 #define	cpu_flush_brnchtgt_C()	cpufuncs.cf_flush_brnchtgt_C()
240 #define	cpu_flush_brnchtgt_E(e)	cpufuncs.cf_flush_brnchtgt_E(e)
241 
242 #define cpu_sleep(m)		cpufuncs.cf_sleep(m)
243 
244 #define cpu_dataabt_fixup(a)		cpufuncs.cf_dataabt_fixup(a)
245 #define cpu_prefetchabt_fixup(a)	cpufuncs.cf_prefetchabt_fixup(a)
246 #define ABORT_FIXUP_OK		0	/* fixup succeeded */
247 #define ABORT_FIXUP_FAILED	1	/* fixup failed */
248 #define ABORT_FIXUP_RETURN	2	/* abort handler should return */
249 
250 #define cpu_context_switch(a)		cpufuncs.cf_context_switch(a)
251 #define cpu_setup(a)			cpufuncs.cf_setup(a)
252 
253 int	set_cpufuncs		(void);
254 int	set_cpufuncs_id		(u_int);
255 #define ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
256 #define ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
257 
258 void	cpufunc_nullop		(void);
259 int	cpufunc_null_fixup	(void *);
260 int	early_abort_fixup	(void *);
261 int	late_abort_fixup	(void *);
262 u_int	cpufunc_id		(void);
263 u_int	cpufunc_control		(u_int, u_int);
264 void	cpufunc_domains		(u_int);
265 u_int	cpufunc_faultstatus	(void);
266 u_int	cpufunc_faultaddress	(void);
267 
268 #if defined(CPU_XSCALE)
269 #define	cpu_cpwait()		cpufuncs.cf_cpwait()
270 #endif
271 
272 #ifndef cpu_cpwait
273 #define	cpu_cpwait()
274 #endif
275 
276 /*
277  * Macros for manipulating CPU interrupts
278  */
279 static __inline uint32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
280 static __inline uint32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
281 static __inline uint32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
282 
283 static __inline uint32_t
__set_cpsr_c(uint32_t bic,uint32_t eor)284 __set_cpsr_c(uint32_t bic, uint32_t eor)
285 {
286 	uint32_t	tmp, ret;
287 
288 	__asm volatile(
289 		"mrs     %0, cpsr\n"	/* Get the CPSR */
290 		"bic	 %1, %0, %2\n"	/* Clear bits */
291 		"eor	 %1, %1, %3\n"	/* XOR bits */
292 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
293 	: "=&r" (ret), "=&r" (tmp)
294 	: "r" (bic), "r" (eor) : "memory");
295 
296 	return ret;
297 }
298 
299 static __inline uint32_t
disable_interrupts(uint32_t mask)300 disable_interrupts(uint32_t mask)
301 {
302 	uint32_t	tmp, ret;
303 	mask &= (I32_bit | F32_bit);
304 
305 	__asm volatile(
306 		"mrs     %0, cpsr\n"	/* Get the CPSR */
307 		"orr	 %1, %0, %2\n"	/* set bits */
308 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
309 	: "=&r" (ret), "=&r" (tmp)
310 	: "r" (mask)
311 	: "memory");
312 
313 	return ret;
314 }
315 
316 static __inline uint32_t
enable_interrupts(uint32_t mask)317 enable_interrupts(uint32_t mask)
318 {
319 	uint32_t	ret;
320 	mask &= (I32_bit | F32_bit);
321 
322 	/* Get the CPSR */
323 	__asm __volatile("mrs\t%0, cpsr\n" : "=r"(ret));
324 #ifdef _ARM_ARCH_6
325 	if (__builtin_constant_p(mask)) {
326 		switch (mask) {
327 		case I32_bit | F32_bit:
328 			__asm __volatile("cpsie\tif");
329 			break;
330 		case I32_bit:
331 			__asm __volatile("cpsie\ti");
332 			break;
333 		case F32_bit:
334 			__asm __volatile("cpsie\tf");
335 			break;
336 		default:
337 			break;
338 		}
339 		return ret;
340 	}
341 #endif /* _ARM_ARCH_6 */
342 
343 	/* Set the control field of CPSR */
344 	__asm volatile("msr\tcpsr_c, %0" :: "r"(ret & ~mask));
345 
346 	return ret;
347 }
348 
349 #define restore_interrupts(old_cpsr)					\
350 	(__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
351 
352 #define	ENABLE_INTERRUPT()		cpsie(I32_bit)
353 #define	DISABLE_INTERRUPT()		cpsid(I32_bit)
354 #define	DISABLE_INTERRUPT_SAVE()	cpsid(I32_bit)
355 
356 static inline void cpsie(register_t psw) __attribute__((__unused__));
357 static inline register_t cpsid(register_t psw) __attribute__((__unused__));
358 
359 static inline void
cpsie(register_t psw)360 cpsie(register_t psw)
361 {
362 #ifdef _ARM_ARCH_6
363 	if (!__builtin_constant_p(psw)) {
364 		enable_interrupts(psw);
365 		return;
366 	}
367 	switch (psw & (I32_bit|F32_bit)) {
368 	case I32_bit:		__asm("cpsie\ti"); break;
369 	case F32_bit:		__asm("cpsie\tf"); break;
370 	case I32_bit|F32_bit:	__asm("cpsie\tif"); break;
371 	}
372 #else
373 	enable_interrupts(psw);
374 #endif
375 }
376 
377 static inline register_t
cpsid(register_t psw)378 cpsid(register_t psw)
379 {
380 #ifdef _ARM_ARCH_6
381 	register_t oldpsw;
382 	if (!__builtin_constant_p(psw))
383 		return disable_interrupts(psw);
384 
385 	__asm("mrs	%0, cpsr" : "=r"(oldpsw));
386 	switch (psw & (I32_bit|F32_bit)) {
387 	case I32_bit:		__asm("cpsid\ti"); break;
388 	case F32_bit:		__asm("cpsid\tf"); break;
389 	case I32_bit|F32_bit:	__asm("cpsid\tif"); break;
390 	}
391 	return oldpsw;
392 #else
393 	return disable_interrupts(psw);
394 #endif
395 }
396 
397 
398 /* Functions to manipulate the CPSR. */
399 u_int	SetCPSR(u_int, u_int);
400 u_int	GetCPSR(void);
401 
402 
403 /*
404  * CPU functions from locore.S
405  */
406 
407 void cpu_reset		(void) __dead;
408 
409 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
410 extern u_int arm_cache_prefer_mask;
411 #endif
412 extern u_int arm_dcache_align;
413 extern u_int arm_dcache_align_mask;
414 
415 extern struct arm_cache_info arm_pcache;
416 extern struct arm_cache_info arm_scache;
417 
418 extern uint32_t cpu_ttb;
419 
420 #endif	/* _KERNEL */
421 
422 #if defined(_KERNEL) || defined(_KMEMUSER)
423 /*
424  * Miscellany
425  */
426 
427 int get_pc_str_offset	(void);
428 
429 bool cpu_gtmr_exists_p(void);
430 u_int cpu_clusterid(void);
431 bool cpu_earlydevice_va_p(void);
432 
433 /*
434  * Functions to manipulate cpu r13
435  * (in arm/arm32/setstack.S)
436  */
437 
438 void set_stackptr	(u_int, u_int);
439 u_int get_stackptr	(u_int);
440 
441 #endif /* _KERNEL || _KMEMUSER */
442 
443 #elif defined(__aarch64__)
444 
445 #include <aarch64/cpufunc.h>
446 
447 #endif /* __arm__/__aarch64__ */
448 
449 #endif	/* _ARM_CPUFUNC_H_ */
450 
451 /* End of cpufunc.h */
452