xref: /netbsd/sys/arch/hppa/include/cpufunc.h (revision c4a72b64)
1 /*	$NetBSD: cpufunc.h,v 1.2 2002/08/19 18:58:29 fredette Exp $	*/
2 
3 /*	$OpenBSD: cpufunc.h,v 1.17 2000/05/15 17:22:40 mickey Exp $	*/
4 
5 /*
6  * Copyright (c) 1998,2000 Michael Shalayeff
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by Michael Shalayeff.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*
35  *  (c) Copyright 1988 HEWLETT-PACKARD COMPANY
36  *
37  *  To anyone who acknowledges that this file is provided "AS IS"
38  *  without any express or implied warranty:
39  *      permission to use, copy, modify, and distribute this file
40  *  for any purpose is hereby granted without fee, provided that
41  *  the above copyright notice and this notice appears in all
42  *  copies, and that the name of Hewlett-Packard Company not be
43  *  used in advertising or publicity pertaining to distribution
44  *  of the software without specific, written prior permission.
45  *  Hewlett-Packard Company makes no representations about the
46  *  suitability of this software for any purpose.
47  */
48 /*
49  * Copyright (c) 1990,1994 The University of Utah and
50  * the Computer Systems Laboratory (CSL).  All rights reserved.
51  *
52  * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
54  * WHATSOEVER RESULTING FROM ITS USE.
55  *
56  * CSL requests users of this software to return to csl-dist@cs.utah.edu any
57  * improvements that they make and grant CSL redistribution rights.
58  *
59  * 	Utah $Hdr: c_support.s 1.8 94/12/14$
60  *	Author: Bob Wheeler, University of Utah CSL
61  */
62 
63 #ifndef _HPPA_CPUFUNC_H_
64 #define _HPPA_CPUFUNC_H_
65 
66 #include <machine/psl.h>
67 #include <machine/pte.h>
68 
69 #define tlbbtop(b) ((b) >> (PGSHIFT - 5))
70 #define tlbptob(p) ((p) << (PGSHIFT - 5))
71 
72 #define hptbtop(b) ((b) >> 17)
73 
74 /* Get space register for an address */
75 static __inline register_t ldsid(vaddr_t p) {
76 	register_t ret;
77 	__asm __volatile("ldsid (%1),%0" : "=r" (ret) : "r" (p));
78 	return ret;
79 }
80 
81 #define mtctl(v,r) __asm __volatile("mtctl %0,%1":: "r" (v), "i" (r))
82 #define mfctl(r,v) __asm __volatile("mfctl %1,%0": "=r" (v): "i" (r))
83 
84 #define mtsp(v,r) __asm __volatile("mtsp %0,%1":: "r" (v), "i" (r))
85 #define mfsp(r,v) __asm __volatile("mfsp %1,%0": "=r" (v): "i" (r))
86 
87 #define ssm(v,r) __asm __volatile("ssm %1,%0": "=r" (r): "i" (v))
88 #define rsm(v,r) __asm __volatile("rsm %1,%0": "=r" (r): "i" (v))
89 
90 /* Move to system mask. Old value of system mask is returned. */
91 static __inline register_t mtsm(register_t mask) {
92 	register_t ret;
93 	__asm __volatile("ssm 0,%0\n\t"
94 			 "mtsm %1": "=&r" (ret) : "r" (mask));
95 	return ret;
96 }
97 
98 static __inline register_t get_psw(void)
99 {
100 	register_t ret;
101 	__asm __volatile("break %1, %2\n\tcopy %%ret0, %0" : "=r" (ret)
102 		: "i" (HPPA_BREAK_KERNEL), "i" (HPPA_BREAK_GET_PSW)
103 		: "r28");
104 	return ret;
105 }
106 
107 static __inline register_t set_psw(register_t psw)
108 {
109 	register_t ret;
110 	__asm __volatile("copy	%0, %%arg0\n\tbreak %1, %2\n\tcopy %%ret0, %0"
111 		: "=r" (ret)
112 		: "i" (HPPA_BREAK_KERNEL), "i" (HPPA_BREAK_SET_PSW), "0" (psw)
113 		: "r26", "r28");
114 	return ret;
115 }
116 
117 
118 #define	fdce(sp,off) __asm __volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off))
119 #define	fice(sp,off) __asm __volatile("fice 0(%0,%1)":: "i" (sp), "r" (off))
120 #define sync_caches() \
121     __asm __volatile("sync\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop")
122 
123 static __inline void
124 iitlba(u_int pg, pa_space_t sp, vaddr_t va)
125 {
126 	mtsp(sp, 1);
127 	__asm volatile("iitlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
128 }
129 
130 static __inline void
131 idtlba(u_int pg, pa_space_t sp, vaddr_t va)
132 {
133 	mtsp(sp, 1);
134 	__asm volatile("idtlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
135 }
136 
137 static __inline void
138 iitlbp(u_int prot, pa_space_t sp, vaddr_t va)
139 {
140 	mtsp(sp, 1);
141 	__asm volatile("iitlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
142 }
143 
144 static __inline void
145 idtlbp(u_int prot, pa_space_t sp, vaddr_t va)
146 {
147 	mtsp(sp, 1);
148 	__asm volatile("idtlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
149 }
150 
151 static __inline void
152 pitlb(pa_space_t sp, vaddr_t va)
153 {
154 	mtsp(sp, 1);
155 	__asm volatile("pitlb %%r0(%%sr1, %0)":: "r" (va));
156 }
157 
158 static __inline void
159 pdtlb(pa_space_t sp, vaddr_t va)
160 {
161 	mtsp(sp, 1);
162 	__asm volatile("pdtlb %%r0(%%sr1, %0)":: "r" (va));
163 }
164 
165 static __inline void
166 pitlbe(pa_space_t sp, vaddr_t va)
167 {
168 	mtsp(sp, 1);
169 	__asm volatile("pitlbe %%r0(%%sr1, %0)":: "r" (va));
170 }
171 
172 static __inline void
173 pdtlbe(pa_space_t sp, vaddr_t va)
174 {
175 	mtsp(sp, 1);
176 	__asm volatile("pdtlbe %%r0(%%sr1, %0)":: "r" (va));
177 }
178 
179 #ifdef _KERNEL
180 void ficache __P((pa_space_t sp, vaddr_t va, vsize_t size));
181 void fdcache __P((pa_space_t sp, vaddr_t va, vsize_t size));
182 void pdcache __P((pa_space_t sp, vaddr_t va, vsize_t size));
183 void fcacheall __P((void));
184 void ptlball __P((void));
185 hppa_hpa_t cpu_gethpa __P((int n));
186 
187 /*
188  * These flush or purge the data cache for a item whose total
189  * size is <= the size of a data cache line, however they don't
190  * check this constraint.
191  */
192 static __inline void
193 fdcache_small(pa_space_t sp, vaddr_t va, vsize_t size)
194 {
195 	__asm volatile(
196 		"	mtsp	%0,%%sr1		\n"
197 		"	fdc	%%r0(%%sr1, %1)		\n"
198 		"	fdc	%2(%%sr1, %1)		\n"
199 		"	sync				\n"
200 		"	syncdma				\n"
201 		:
202 		: "r" (sp), "r" (va), "r" (size - 1));
203 }
204 static __inline void
205 pdcache_small(pa_space_t sp, vaddr_t va, vsize_t size)
206 {
207 	__asm volatile(
208 		"	mtsp	%0,%%sr1		\n"
209 		"	pdc	%%r0(%%sr1, %1)		\n"
210 		"	pdc	%2(%%sr1, %1)		\n"
211 		"	sync				\n"
212 		"	syncdma				\n"
213 		:
214 		: "r" (sp), "r" (va), "r" (size - 1));
215 }
216 
217 #endif /* _KERNEL */
218 
219 #endif /* _HPPA_CPUFUNC_H_ */
220