xref: /netbsd/sys/arch/sparc/include/psl.h (revision 6550d01e)
1 /*	$NetBSD: psl.h,v 1.45 2009/05/16 17:16:12 martin Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)psl.h	8.1 (Berkeley) 6/11/93
41  */
42 
43 #ifndef PSR_IMPL
44 
45 /*
46  * SPARC Process Status Register (in psl.h for hysterical raisins).  This
47  * doesn't exist on the V9.
48  *
49  * The picture in the Sun manuals looks like this:
50  *	                                     1 1
51  *	 31   28 27   24 23   20 19       14 3 2 11    8 7 6 5 4       0
52  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
53  *	|  impl |  ver  |  icc  |  reserved |E|E|  pil  |S|P|E|   CWP   |
54  *	|       |       |n z v c|           |C|F|       | |S|T|         |
55  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
56  */
57 
58 #define PSR_IMPL	0xf0000000	/* implementation */
59 #define PSR_VER		0x0f000000	/* version */
60 #define PSR_ICC		0x00f00000	/* integer condition codes */
61 #define PSR_N		0x00800000	/* negative */
62 #define PSR_Z		0x00400000	/* zero */
63 #define PSR_O		0x00200000	/* overflow */
64 #define PSR_C		0x00100000	/* carry */
65 #define PSR_EC		0x00002000	/* coprocessor enable */
66 #define PSR_EF		0x00001000	/* FP enable */
67 #define PSR_PIL		0x00000f00	/* interrupt level */
68 #define PSR_S		0x00000080	/* supervisor (kernel) mode */
69 #define PSR_PS		0x00000040	/* previous supervisor mode (traps) */
70 #define PSR_ET		0x00000020	/* trap enable */
71 #define PSR_CWP		0x0000001f	/* current window pointer */
72 
73 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET"
74 
75 /*
76  * SPARC V9 CCR register
77  */
78 
79 #define ICC_C	0x01L
80 #define ICC_V	0x02L
81 #define ICC_Z	0x04L
82 #define ICC_N	0x08L
83 #define XCC_SHIFT	4
84 #define XCC_C	(ICC_C<<XCC_SHIFT)
85 #define XCC_V	(ICC_V<<XCC_SHIFT)
86 #define XCC_Z	(ICC_Z<<XCC_SHIFT)
87 #define XCC_N	(ICC_N<<XCC_SHIFT)
88 
89 
90 /*
91  * SPARC V9 PSTATE register (what replaces the PSR in V9)
92  *
93  * Here's the layout:
94  *
95  *    11   10    9     8   7  6   5     4     3     2     1   0
96  *  +------------------------------------------------------------+
97  *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
98  *  +------------------------------------------------------------+
99  */
100 
101 #define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
102 #define PSTATE_MG	0x400	/* enable spitfire MMU globals */
103 #define PSTATE_CLE	0x200	/* current little endian */
104 #define PSTATE_TLE	0x100	/* traps little endian */
105 #define PSTATE_MM	0x0c0	/* memory model */
106 #define PSTATE_MM_TSO	0x000	/* total store order */
107 #define PSTATE_MM_PSO	0x040	/* partial store order */
108 #define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
109 #define PSTATE_RED	0x020	/* RED state */
110 #define PSTATE_PEF	0x010	/* enable floating point */
111 #define PSTATE_AM	0x008	/* 32-bit address masking */
112 #define PSTATE_PRIV	0x004	/* privileged mode */
113 #define PSTATE_IE	0x002	/* interrupt enable */
114 #define PSTATE_AG	0x001	/* enable alternate globals */
115 
116 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
117 
118 
119 /*
120  * 32-bit code requires TSO or at best PSO since that's what's supported on
121  * SPARC V8 and earlier machines.
122  *
123  * 64-bit code sets the memory model in the ELF header.
124  *
125  * We're running kernel code in TSO for the moment so we don't need to worry
126  * about possible memory barrier bugs.
127  */
128 
129 #ifdef __arch64__
130 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
131 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
132 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
133 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
134 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
135 #define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
136 #else
137 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
138 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG)
139 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV)
140 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
141 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
142 #define PSTATE_USER	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
143 #endif
144 
145 /*
146  * SPARC V9 TSTATE register
147  *
148  *   39 32 31 24 23 18  17   8	7 5 4   0
149  *  +-----+-----+-----+--------+---+-----+
150  *  | CCR | ASI |  -  | PSTATE | - | CWP |
151  *  +-----+-----+-----+--------+---+-----+
152  * */
153 
154 #define TSTATE_CWP		0x01f
155 #define TSTATE_PSTATE		0x6ff00
156 #define TSTATE_PSTATE_SHIFT	8
157 #define TSTATE_ASI		0xff000000LL
158 #define TSTATE_ASI_SHIFT	24
159 #define TSTATE_CCR		0xff00000000LL
160 #define TSTATE_CCR_SHIFT	32
161 
162 #define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-19))
163 #define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-19))
164 
165 /*
166  * These are here to simplify life.
167  */
168 #define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
169 #define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
170 #define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
171 #define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
172 #define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
173 #define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
174 #define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
175 #define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
176 #define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
177 #define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
178 #define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
179 #define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
180 #define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
181 #define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
182 
183 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
184 
185 #define TSTATE_KERN	((TSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
186 #define TSTATE_USER	((TSTATE_USER)<<TSTATE_PSTATE_SHIFT)
187 /*
188  * SPARC V9 VER version register.
189  *
190  *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
191  * +-------+------+------+-----+-------+---+--------+
192  * | manuf | impl | mask |  -  | maxtl | - | maxwin |
193  * +-------+------+------+-----+-------+---+--------+
194  *
195  */
196 
197 #define VER_MANUF	0xffff000000000000LL
198 #define VER_MANUF_SHIFT	48
199 #define VER_IMPL	0x0000ffff00000000LL
200 #define VER_IMPL_SHIFT	32
201 #define VER_MASK	0x00000000ff000000LL
202 #define VER_MASK_SHIFT	24
203 #define VER_MAXTL	0x000000000000ff00LL
204 #define VER_MAXTL_SHIFT	8
205 #define VER_MAXWIN	0x000000000000001fLL
206 
207 /*
208  * Here are a few things to help us transition between user and kernel mode:
209  */
210 
211 /* Memory models */
212 #define KERN_MM		PSTATE_MM_TSO
213 #define USER_MM		PSTATE_MM_RMO
214 
215 /*
216  * Register window handlers.  These point to generic routines that check the
217  * stack pointer and then vector to the real handler.  We could optimize this
218  * if we could guarantee only 32-bit or 64-bit stacks.
219  */
220 #define WSTATE_KERN	026
221 #define WSTATE_USER	022
222 
223 #define CWP		0x01f
224 
225 /* 64-byte alignment -- this seems the best place to put this. */
226 #define BLOCK_SIZE	64
227 #define BLOCK_ALIGN	0x3f
228 
229 #if defined(_KERNEL) && !defined(_LOCORE)
230 
231 /*
232  * GCC pseudo-functions for manipulating PSR (primarily PIL field).
233  */
234 static __inline int
235 getpsr(void)
236 {
237 	int psr;
238 
239 	__asm volatile("rd %%psr,%0" : "=r" (psr));
240 	return (psr);
241 }
242 
243 static __inline int
244 getmid(void)
245 {
246 	int mid;
247 
248 	__asm volatile("rd %%tbr,%0" : "=r" (mid));
249 	return ((mid >> 20) & 0x3);
250 }
251 
252 static __inline void
253 setpsr(int newpsr)
254 {
255 	__asm volatile("wr %0,0,%%psr" : : "r" (newpsr) : "memory");
256 	__asm volatile("nop; nop; nop");
257 }
258 
259 static __inline void
260 spl0(void)
261 {
262 	int psr, oldipl;
263 
264 	/*
265 	 * wrpsr xors two values: we choose old psr and old ipl here,
266 	 * which gives us the same value as the old psr but with all
267 	 * the old PIL bits turned off.
268 	 */
269 	__asm volatile("rd %%psr,%0" : "=r" (psr) : : "memory");
270 	oldipl = psr & PSR_PIL;
271 	__asm volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
272 
273 	/*
274 	 * Three instructions must execute before we can depend
275 	 * on the bits to be changed.
276 	 */
277 	__asm volatile("nop; nop; nop");
278 }
279 
280 /*
281  * PIL 1 through 14 can use this macro.
282  * (spl0 and splhigh are special since they put all 0s or all 1s
283  * into the ipl field.)
284  */
285 #define	_SPLSET(name, newipl) \
286 static __inline void name(void) \
287 { \
288 	int psr; \
289 	__asm volatile("rd %%psr,%0" : "=r" (psr)); \
290 	psr &= ~PSR_PIL; \
291 	__asm volatile("wr %0,%1,%%psr" : : \
292 	    "r" (psr), "n" ((newipl) << 8)); \
293 	__asm volatile("nop; nop; nop" : : : "memory"); \
294 }
295 
296 _SPLSET(spllowerschedclock, IPL_SCHED)
297 
298 typedef uint8_t ipl_t;
299 typedef struct {
300 	ipl_t _ipl;
301 } ipl_cookie_t;
302 
303 static inline ipl_cookie_t
304 makeiplcookie(ipl_t ipl)
305 {
306 
307 	return (ipl_cookie_t){._ipl = ipl};
308 }
309 
310 /* Raise IPL and return previous value */
311 static __inline int
312 splraiseipl(ipl_cookie_t icookie)
313 {
314 	int newipl = icookie._ipl;
315 	int psr, oldipl;
316 
317 	__asm volatile("rd %%psr,%0" : "=r" (psr));
318 
319 	oldipl = psr & PSR_PIL;
320 	newipl <<= 8;
321 	if (newipl <= oldipl)
322 		return (oldipl);
323 
324 	psr = (psr & ~oldipl) | newipl;
325 
326 	__asm volatile("wr %0,0,%%psr" : : "r" (psr));
327 	__asm volatile("nop; nop; nop" : : : "memory");
328 
329 	return (oldipl);
330 }
331 
332 #include <sys/spl.h>
333 
334 #define	splausoft()	splraiseipl(makeiplcookie(IPL_SOFTAUDIO))
335 #define	splfdsoft()	splraiseipl(makeiplcookie(IPL_SOFTFDC))
336 
337 #define	splfd()		splraiseipl(makeiplcookie(IPL_FD))
338 #define	splts102()	splraiseipl(makeiplcookie(IPL_TS102))
339 
340 #define	splzs()		splraiseipl(makeiplcookie(IPL_ZS))
341 
342 /* splx does not have a return value */
343 static __inline void
344 splx(int newipl)
345 {
346 	int psr;
347 
348 	__asm volatile("rd %%psr,%0" : "=r" (psr) : : "memory");
349 	__asm volatile("wr %0,%1,%%psr" : : \
350 	    "r" (psr & ~PSR_PIL), "rn" (newipl));
351 	__asm volatile("nop; nop; nop");
352 }
353 #endif /* KERNEL && !_LOCORE */
354 
355 #endif /* PSR_IMPL */
356