1 /* $OpenBSD: psl.h,v 1.37 2024/11/06 07:11:14 miod Exp $ */
2 /* $NetBSD: psl.h,v 1.20 2001/04/13 23:30:05 thorpej Exp $ */
3
4 /*
5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This software was developed by the Computer Systems Engineering group
9 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
10 * contributed to Berkeley.
11 *
12 * All advertising materials mentioning features or use of this software
13 * must display the following acknowledgement:
14 * This product includes software developed by the University of
15 * California, Lawrence Berkeley Laboratory.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)psl.h 8.1 (Berkeley) 6/11/93
42 */
43
44 #ifndef _SPARC64_PSL_
45 #define _SPARC64_PSL_
46
47 /* Interesting spl()s */
48 #define PIL_SCSI 3
49 #define PIL_BIO 5
50 #define PIL_VIDEO 5
51 #define PIL_TTY 6
52 #define PIL_NET 6
53 #define PIL_VM 7
54 #define PIL_AUD 8
55 #define PIL_CLOCK 10
56 #define PIL_FD 11
57 #define PIL_SER 12
58 #define PIL_STATCLOCK 14
59 #define PIL_HIGH 15
60 #define PIL_SCHED PIL_STATCLOCK
61
62 /*
63 * SPARC V9 CCR register
64 */
65
66 #define ICC_C 0x01L
67 #define ICC_V 0x02L
68 #define ICC_Z 0x04L
69 #define ICC_N 0x08L
70 #define XCC_SHIFT 4
71 #define XCC_C (ICC_C<<XCC_SHIFT)
72 #define XCC_V (ICC_V<<XCC_SHIFT)
73 #define XCC_Z (ICC_Z<<XCC_SHIFT)
74 #define XCC_N (ICC_N<<XCC_SHIFT)
75
76
77 /*
78 * SPARC V9 PSTATE register (what replaces the PSR in V9)
79 *
80 * Here's the layout:
81 *
82 * 11 10 9 8 7 6 5 4 3 2 1 0
83 * +------------------------------------------------------------+
84 * | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
85 * +------------------------------------------------------------+
86 */
87
88 #define PSTATE_IG 0x800 /* enable spitfire interrupt globals */
89 #define PSTATE_MG 0x400 /* enable spitfire MMU globals */
90 #define PSTATE_CLE 0x200 /* current little endian */
91 #define PSTATE_TLE 0x100 /* traps little endian */
92 #define PSTATE_MM 0x0c0 /* memory model */
93 #define PSTATE_MM_TSO 0x000 /* total store order */
94 #define PSTATE_MM_PSO 0x040 /* partial store order */
95 #define PSTATE_MM_RMO 0x080 /* Relaxed memory order */
96 #define PSTATE_RED 0x020 /* RED state */
97 #define PSTATE_PEF 0x010 /* enable floating point */
98 #define PSTATE_AM 0x008 /* 32-bit address masking */
99 #define PSTATE_PRIV 0x004 /* privileged mode */
100 #define PSTATE_IE 0x002 /* interrupt enable */
101 #define PSTATE_AG 0x001 /* enable alternate globals */
102
103 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
104
105
106 /*
107 * We're running kernel code in TSO for the moment so we don't need to worry
108 * about possible memory barrier bugs.
109 * Userland code sets the memory model in the ELF header.
110 */
111
112 #define PSTATE_PROM (PSTATE_MM_TSO|PSTATE_PRIV)
113 #define PSTATE_KERN (PSTATE_MM_TSO|PSTATE_PRIV)
114 #define PSTATE_INTR (PSTATE_KERN|PSTATE_IE)
115 #define PSTATE_USER (PSTATE_MM_RMO|PSTATE_IE)
116
117
118 /*
119 * SPARC V9 TSTATE register
120 *
121 * 39 32 31 24 23 18 17 8 7 5 4 0
122 * +-----+-----+-----+--------+---+-----+
123 * | CCR | ASI | - | PSTATE | - | CWP |
124 * +-----+-----+-----+--------+---+-----+
125 */
126
127 #define TSTATE_CWP 0x01f
128 #define TSTATE_PSTATE 0x6ff00
129 #define TSTATE_PSTATE_SHIFT 8
130 #define TSTATE_ASI 0xff000000LL
131 #define TSTATE_ASI_SHIFT 24
132 #define TSTATE_CCR 0xff00000000LL
133 #define TSTATE_CCR_SHIFT 32
134
135 /* Leftover SPARC V8 PSTATE stuff */
136 #define PSR_ICC 0x00f00000
137
138 /*
139 * These are here to simplify life.
140 */
141 #define TSTATE_PEF (PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
142 #define TSTATE_PRIV (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
143
144 #define TSTATE_KERN ((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
145 /*
146 * SPARC V9 VER version register.
147 *
148 * 63 48 47 32 31 24 23 16 15 8 7 5 4 0
149 * +-------+------+------+-----+-------+---+--------+
150 * | manuf | impl | mask | - | maxtl | - | maxwin |
151 * +-------+------+------+-----+-------+---+--------+
152 *
153 */
154
155 #define VER_MANUF 0xffff000000000000ULL
156 #define VER_MANUF_SHIFT 48
157 #define VER_IMPL 0x0000ffff00000000ULL
158 #define VER_IMPL_SHIFT 32
159 #define VER_MASK 0x00000000ff000000ULL
160 #define VER_MASK_SHIFT 24
161 #define VER_MAXTL 0x000000000000ff00ULL
162 #define VER_MAXTL_SHIFT 8
163 #define VER_MAXWIN 0x000000000000001fULL
164
165 #define IMPL_SPARC64 0x01 /* SPARC64 */
166 #define IMPL_SPARC64_II 0x02 /* SPARC64-II */
167 #define IMPL_SPARC64_III 0x03 /* SPARC64-III */
168 #define IMPL_SPARC64_IV 0x04 /* SPARC64-IV */
169 #define IMPL_ZEUS 0x05 /* SPARC64-V */
170 #define IMPL_OLYMPUS_C 0x06 /* SPARC64-VI */
171 #define IMPL_JUPITER 0x07 /* SPARC64-VII */
172 #define IMPL_SPITFIRE 0x10 /* UltraSPARC */
173 #define IMPL_BLACKBIRD 0x11 /* UltraSPARC-II */
174 #define IMPL_SABRE 0x12 /* UltraSPARC-IIi */
175 #define IMPL_HUMMINGBIRD 0x13 /* UltraSPARC-IIe */
176 #define IMPL_CHEETAH 0x14 /* UltraSPARC-III */
177 #define IMPL_CHEETAH_PLUS 0x15 /* UltraSPARC-III+ */
178 #define IMPL_JALAPENO 0x16 /* UltraSPARC-IIIi */
179 #define IMPL_JAGUAR 0x18 /* UltraSPARC-IV */
180 #define IMPL_PANTHER 0x19 /* UltraSPARC-IV+ */
181 #define IMPL_SERRANO 0x22 /* UltraSPARC-IIIi+ */
182
183 /*
184 * Here are a few things to help us transition between user and kernel mode:
185 */
186
187 /* Memory models */
188 #define KERN_MM PSTATE_MM_TSO
189 #define USER_MM PSTATE_MM_RMO
190
191 /*
192 * Register window handlers. These point to generic routines that check the
193 * stack pointer and then vector to the real handler. We could optimize this
194 * if we could guarantee only 32-bit or 64-bit stacks.
195 */
196 #define WSTATE_KERN 027
197 #define WSTATE_USER 022
198
199 #define CWP 0x01f
200
201 /* 64-byte alignment -- this seems the best place to put this. */
202 #define BLOCK_SIZE 64
203 #define BLOCK_ALIGN 0x3f
204
205 #if defined(_KERNEL) && !defined(_LOCORE)
206
207 #ifdef DIAGNOSTIC
208 /*
209 * Although this function is implemented in MI code, it must be in this MD
210 * header because we don't want this header to include MI includes.
211 */
212 void splassert_fail(int, int, const char *);
213 extern int splassert_ctl;
214 void splassert_check(int, const char *);
215 #define splassert(__wantipl) do { \
216 if (splassert_ctl > 0) { \
217 splassert_check(__wantipl, __func__); \
218 } \
219 } while (0)
220 #define splsoftassert(wantipl) splassert(wantipl)
221 #else
222 #define splassert(wantipl) do { /* nada */ } while (0)
223 #define splsoftassert(wantipl) do { /* nada */ } while (0)
224 #endif
225
226 /*
227 * GCC pseudo-functions for manipulating privileged registers
228 */
229 static inline u_int64_t getpstate(void);
230 static inline u_int64_t
getpstate(void)231 getpstate(void)
232 {
233 return (sparc_rdpr(pstate));
234 }
235
236 static inline void setpstate(u_int64_t);
237 static inline void
setpstate(u_int64_t newpstate)238 setpstate(u_int64_t newpstate)
239 {
240 sparc_wrpr(pstate, newpstate, 0);
241 }
242
243 static inline int getcwp(void);
244 static inline int
getcwp(void)245 getcwp(void)
246 {
247 return (sparc_rdpr(cwp));
248 }
249
250 static inline void setcwp(u_int64_t);
251 static inline void
setcwp(u_int64_t newcwp)252 setcwp(u_int64_t newcwp)
253 {
254 sparc_wrpr(cwp, newcwp, 0);
255 }
256
257 static inline u_int64_t getver(void);
258 static inline u_int64_t
getver(void)259 getver(void)
260 {
261 return (sparc_rdpr(ver));
262 }
263
264 static inline u_int64_t intr_disable(void);
265 static inline u_int64_t
intr_disable(void)266 intr_disable(void)
267 {
268 u_int64_t s;
269
270 s = sparc_rdpr(pstate);
271 sparc_wrpr(pstate, s & ~PSTATE_IE, 0);
272 return (s);
273 }
274
275 static inline void intr_restore(u_int64_t);
276 static inline void
intr_restore(u_int64_t s)277 intr_restore(u_int64_t s)
278 {
279 sparc_wrpr(pstate, s, 0);
280 }
281
282 static inline void stxa_sync(u_int64_t, u_int64_t, u_int64_t);
283 static inline void
stxa_sync(u_int64_t va,u_int64_t asi,u_int64_t val)284 stxa_sync(u_int64_t va, u_int64_t asi, u_int64_t val)
285 {
286 u_int64_t s = intr_disable();
287 stxa_nc(va, asi, val);
288 __asm volatile("membar #Sync" : : : "memory");
289 intr_restore(s);
290 }
291
292 static inline int
_spl(int newipl)293 _spl(int newipl)
294 {
295 int oldpil;
296
297 __asm volatile( " rdpr %%pil, %0 \n"
298 " wrpr %%g0, %1, %%pil \n"
299 : "=&r" (oldpil)
300 : "I" (newipl)
301 : "%g0");
302 __asm volatile("" : : : "memory");
303
304 return (oldpil);
305 }
306
307 /* A non-priority-decreasing version of SPL */
308 static inline int
_splraise(int newpil)309 _splraise(int newpil)
310 {
311 int oldpil;
312
313 oldpil = sparc_rdpr(pil);
314 if (newpil > oldpil)
315 sparc_wrpr(pil, newpil, 0);
316 return (oldpil);
317 }
318
319 static inline void
_splx(int newpil)320 _splx(int newpil)
321 {
322 sparc_wrpr(pil, newpil, 0);
323 }
324
325 #endif /* KERNEL && !_LOCORE */
326
327 #endif /* _SPARC64_PSL_ */
328