xref: /openbsd/sys/arch/sparc64/include/psl.h (revision 4fb9ab68)
1 /*	$OpenBSD: psl.h,v 1.36 2024/06/26 01:40:49 jsg Exp $	*/
2 /*	$NetBSD: psl.h,v 1.20 2001/04/13 23:30:05 thorpej Exp $ */
3 
4 /*
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This software was developed by the Computer Systems Engineering group
9  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
10  * contributed to Berkeley.
11  *
12  * All advertising materials mentioning features or use of this software
13  * must display the following acknowledgement:
14  *	This product includes software developed by the University of
15  *	California, Lawrence Berkeley Laboratory.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)psl.h	8.1 (Berkeley) 6/11/93
42  */
43 
44 #ifndef _SPARC64_PSL_
45 #define _SPARC64_PSL_
46 
47 /* Interesting spl()s */
48 #define PIL_SCSI	3
49 #define PIL_BIO		5
50 #define PIL_VIDEO	5
51 #define PIL_TTY		6
52 #define PIL_NET		6
53 #define PIL_VM		7
54 #define	PIL_AUD		8
55 #define PIL_CLOCK	10
56 #define PIL_FD		11
57 #define PIL_SER		12
58 #define PIL_STATCLOCK	14
59 #define PIL_HIGH	15
60 #define PIL_SCHED	PIL_STATCLOCK
61 
62 /*
63  * SPARC V9 CCR register
64  */
65 
66 #define ICC_C	0x01L
67 #define ICC_V	0x02L
68 #define ICC_Z	0x04L
69 #define ICC_N	0x08L
70 #define XCC_SHIFT	4
71 #define XCC_C	(ICC_C<<XCC_SHIFT)
72 #define XCC_V	(ICC_V<<XCC_SHIFT)
73 #define XCC_Z	(ICC_Z<<XCC_SHIFT)
74 #define XCC_N	(ICC_N<<XCC_SHIFT)
75 
76 
77 /*
78  * SPARC V9 PSTATE register (what replaces the PSR in V9)
79  *
80  * Here's the layout:
81  *
82  *    11   10    9     8   7  6   5     4     3     2     1   0
83  *  +------------------------------------------------------------+
84  *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
85  *  +------------------------------------------------------------+
86  */
87 
88 #define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
89 #define PSTATE_MG	0x400	/* enable spitfire MMU globals */
90 #define PSTATE_CLE	0x200	/* current little endian */
91 #define PSTATE_TLE	0x100	/* traps little endian */
92 #define PSTATE_MM	0x0c0	/* memory model */
93 #define PSTATE_MM_TSO	0x000	/* total store order */
94 #define PSTATE_MM_PSO	0x040	/* partial store order */
95 #define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
96 #define PSTATE_RED	0x020	/* RED state */
97 #define PSTATE_PEF	0x010	/* enable floating point */
98 #define PSTATE_AM	0x008	/* 32-bit address masking */
99 #define PSTATE_PRIV	0x004	/* privileged mode */
100 #define PSTATE_IE	0x002	/* interrupt enable */
101 #define PSTATE_AG	0x001	/* enable alternate globals */
102 
103 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
104 
105 
106 /*
107  * 32-bit code requires TSO or at best PSO since that's what's supported on
108  * SPARC V8 and earlier machines.
109  *
110  * 64-bit code sets the memory model in the ELF header.
111  *
112  * We're running kernel code in TSO for the moment so we don't need to worry
113  * about possible memory barrier bugs.
114  */
115 
116 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
117 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
118 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
119 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
120 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
121 #define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
122 
123 
124 /*
125  * SPARC V9 TSTATE register
126  *
127  *   39 32 31 24 23 18  17   8	7 5 4   0
128  *  +-----+-----+-----+--------+---+-----+
129  *  | CCR | ASI |  -  | PSTATE | - | CWP |
130  *  +-----+-----+-----+--------+---+-----+
131  */
132 
133 #define TSTATE_CWP		0x01f
134 #define TSTATE_PSTATE		0x6ff00
135 #define TSTATE_PSTATE_SHIFT	8
136 #define TSTATE_ASI		0xff000000LL
137 #define TSTATE_ASI_SHIFT	24
138 #define TSTATE_CCR		0xff00000000LL
139 #define TSTATE_CCR_SHIFT	32
140 
141 /* Leftover SPARC V8 PSTATE stuff */
142 #define PSR_ICC 0x00f00000
143 #define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-19))
144 #define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-19))
145 
146 /*
147  * These are here to simplify life.
148  */
149 #define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
150 #define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
151 #define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
152 #define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
153 #define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
154 #define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
155 #define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
156 #define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
157 #define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
158 #define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
159 #define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
160 #define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
161 #define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
162 #define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
163 
164 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
165 
166 #define TSTATE_KERN	((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
167 #define TSTATE_USER	((PSTATE_USER)<<TSTATE_PSTATE_SHIFT)
168 /*
169  * SPARC V9 VER version register.
170  *
171  *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
172  * +-------+------+------+-----+-------+---+--------+
173  * | manuf | impl | mask |  -  | maxtl | - | maxwin |
174  * +-------+------+------+-----+-------+---+--------+
175  *
176  */
177 
178 #define VER_MANUF	0xffff000000000000ULL
179 #define VER_MANUF_SHIFT	48
180 #define VER_IMPL	0x0000ffff00000000ULL
181 #define VER_IMPL_SHIFT	32
182 #define VER_MASK	0x00000000ff000000ULL
183 #define VER_MASK_SHIFT	24
184 #define VER_MAXTL	0x000000000000ff00ULL
185 #define VER_MAXTL_SHIFT	8
186 #define VER_MAXWIN	0x000000000000001fULL
187 
188 #define IMPL_SPARC64		0x01 /* SPARC64 */
189 #define IMPL_SPARC64_II		0x02 /* SPARC64-II */
190 #define IMPL_SPARC64_III	0x03 /* SPARC64-III */
191 #define IMPL_SPARC64_IV		0x04 /* SPARC64-IV */
192 #define IMPL_ZEUS		0x05 /* SPARC64-V */
193 #define IMPL_OLYMPUS_C		0x06 /* SPARC64-VI */
194 #define IMPL_JUPITER		0x07 /* SPARC64-VII */
195 #define IMPL_SPITFIRE		0x10 /* UltraSPARC */
196 #define IMPL_BLACKBIRD		0x11 /* UltraSPARC-II */
197 #define IMPL_SABRE		0x12 /* UltraSPARC-IIi */
198 #define IMPL_HUMMINGBIRD	0x13 /* UltraSPARC-IIe */
199 #define IMPL_CHEETAH		0x14 /* UltraSPARC-III */
200 #define IMPL_CHEETAH_PLUS	0x15 /* UltraSPARC-III+ */
201 #define IMPL_JALAPENO		0x16 /* UltraSPARC-IIIi */
202 #define IMPL_JAGUAR		0x18 /* UltraSPARC-IV */
203 #define IMPL_PANTHER		0x19 /* UltraSPARC-IV+ */
204 #define IMPL_SERRANO		0x22 /* UltraSPARC-IIIi+ */
205 
206 /*
207  * Here are a few things to help us transition between user and kernel mode:
208  */
209 
210 /* Memory models */
211 #define KERN_MM		PSTATE_MM_TSO
212 #define USER_MM		PSTATE_MM_RMO
213 
214 /*
215  * Register window handlers.  These point to generic routines that check the
216  * stack pointer and then vector to the real handler.  We could optimize this
217  * if we could guarantee only 32-bit or 64-bit stacks.
218  */
219 #define WSTATE_KERN	027
220 #define WSTATE_USER	022
221 
222 #define CWP		0x01f
223 
224 /* 64-byte alignment -- this seems the best place to put this. */
225 #define BLOCK_SIZE	64
226 #define BLOCK_ALIGN	0x3f
227 
228 #if defined(_KERNEL) && !defined(_LOCORE)
229 
230 #ifdef DIAGNOSTIC
231 /*
232  * Although this function is implemented in MI code, it must be in this MD
233  * header because we don't want this header to include MI includes.
234  */
235 void splassert_fail(int, int, const char *);
236 extern int splassert_ctl;
237 void splassert_check(int, const char *);
238 #define splassert(__wantipl) do {			\
239 	if (splassert_ctl > 0) {			\
240 		splassert_check(__wantipl, __func__);	\
241 	}						\
242 } while (0)
243 #define splsoftassert(wantipl) splassert(wantipl)
244 #else
245 #define splassert(wantipl)	do { /* nada */ } while (0)
246 #define splsoftassert(wantipl)	do { /* nada */ } while (0)
247 #endif
248 
249 /*
250  * GCC pseudo-functions for manipulating privileged registers
251  */
252 static inline u_int64_t getpstate(void);
253 static inline u_int64_t
254 getpstate(void)
255 {
256 	return (sparc_rdpr(pstate));
257 }
258 
259 static inline void setpstate(u_int64_t);
260 static inline void
261 setpstate(u_int64_t newpstate)
262 {
263 	sparc_wrpr(pstate, newpstate, 0);
264 }
265 
266 static inline int getcwp(void);
267 static inline int
268 getcwp(void)
269 {
270 	return (sparc_rdpr(cwp));
271 }
272 
273 static inline void setcwp(u_int64_t);
274 static inline void
275 setcwp(u_int64_t newcwp)
276 {
277 	sparc_wrpr(cwp, newcwp, 0);
278 }
279 
280 static inline u_int64_t getver(void);
281 static inline u_int64_t
282 getver(void)
283 {
284 	return (sparc_rdpr(ver));
285 }
286 
287 static inline u_int64_t intr_disable(void);
288 static inline u_int64_t
289 intr_disable(void)
290 {
291 	u_int64_t s;
292 
293 	s = sparc_rdpr(pstate);
294 	sparc_wrpr(pstate, s & ~PSTATE_IE, 0);
295 	return (s);
296 }
297 
298 static inline void intr_restore(u_int64_t);
299 static inline void
300 intr_restore(u_int64_t s)
301 {
302 	sparc_wrpr(pstate, s, 0);
303 }
304 
305 static inline void stxa_sync(u_int64_t, u_int64_t, u_int64_t);
306 static inline void
307 stxa_sync(u_int64_t va, u_int64_t asi, u_int64_t val)
308 {
309 	u_int64_t s = intr_disable();
310 	stxa_nc(va, asi, val);
311 	__asm volatile("membar #Sync" : : : "memory");
312 	intr_restore(s);
313 }
314 
315 static inline int
316 _spl(int newipl)
317 {
318 	int oldpil;
319 
320 	__asm volatile(	"    rdpr %%pil, %0		\n"
321 			"    wrpr %%g0, %1, %%pil	\n"
322 	    : "=&r" (oldpil)
323 	    : "I" (newipl)
324 	    : "%g0");
325 	__asm volatile("" : : : "memory");
326 
327 	return (oldpil);
328 }
329 
330 /* A non-priority-decreasing version of SPL */
331 static inline int
332 _splraise(int newpil)
333 {
334 	int oldpil;
335 
336 	oldpil = sparc_rdpr(pil);
337 	if (newpil > oldpil)
338 		sparc_wrpr(pil, newpil, 0);
339         return (oldpil);
340 }
341 
342 static inline void
343 _splx(int newpil)
344 {
345 	sparc_wrpr(pil, newpil, 0);
346 }
347 
348 #endif /* KERNEL && !_LOCORE */
349 
350 #endif /* _SPARC64_PSL_ */
351