xref: /netbsd/sys/arch/sparc/include/psl.h (revision bf9ec67e)
1 /*	$NetBSD: psl.h,v 1.29 2001/06/08 01:33:32 uwe Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)psl.h	8.1 (Berkeley) 6/11/93
45  */
46 
47 #ifndef PSR_IMPL
48 
49 /*
50  * SPARC Process Status Register (in psl.h for hysterical raisins).  This
51  * doesn't exist on the V9.
52  *
53  * The picture in the Sun manuals looks like this:
54  *	                                     1 1
55  *	 31   28 27   24 23   20 19       14 3 2 11    8 7 6 5 4       0
56  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
57  *	|  impl |  ver  |  icc  |  reserved |E|E|  pil  |S|P|E|   CWP   |
58  *	|       |       |n z v c|           |C|F|       | |S|T|         |
59  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
60  */
61 
62 #define PSR_IMPL	0xf0000000	/* implementation */
63 #define PSR_VER		0x0f000000	/* version */
64 #define PSR_ICC		0x00f00000	/* integer condition codes */
65 #define PSR_N		0x00800000	/* negative */
66 #define PSR_Z		0x00400000	/* zero */
67 #define PSR_O		0x00200000	/* overflow */
68 #define PSR_C		0x00100000	/* carry */
69 #define PSR_EC		0x00002000	/* coprocessor enable */
70 #define PSR_EF		0x00001000	/* FP enable */
71 #define PSR_PIL		0x00000f00	/* interrupt level */
72 #define PSR_S		0x00000080	/* supervisor (kernel) mode */
73 #define PSR_PS		0x00000040	/* previous supervisor mode (traps) */
74 #define PSR_ET		0x00000020	/* trap enable */
75 #define PSR_CWP		0x0000001f	/* current window pointer */
76 
77 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET"
78 
79 /* Interesting spl()s */
80 #define PIL_SCSI	3
81 #define PIL_FDSOFT	4
82 #define PIL_AUSOFT	4
83 #define PIL_BIO		5
84 #define PIL_VIDEO	5
85 #define PIL_TTY		6
86 #define PIL_LPT		6
87 #define PIL_NET		7
88 #define PIL_CLOCK	10
89 #define PIL_FD		11
90 #define PIL_SER		13
91 #define	PIL_AUD		13
92 #define PIL_HIGH	15
93 #define PIL_SCHED	PIL_CLOCK
94 #define PIL_LOCK	PIL_HIGH
95 
96 /*
97  * SPARC V9 CCR register
98  */
99 
100 #define ICC_C	0x01L
101 #define ICC_V	0x02L
102 #define ICC_Z	0x04L
103 #define ICC_N	0x08L
104 #define XCC_SHIFT	4
105 #define XCC_C	(ICC_C<<XCC_SHIFT)
106 #define XCC_V	(ICC_V<<XCC_SHIFT)
107 #define XCC_Z	(ICC_Z<<XCC_SHIFT)
108 #define XCC_N	(ICC_N<<XCC_SHIFT)
109 
110 
111 /*
112  * SPARC V9 PSTATE register (what replaces the PSR in V9)
113  *
114  * Here's the layout:
115  *
116  *    11   10    9     8   7  6   5     4     3     2     1   0
117  *  +------------------------------------------------------------+
118  *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
119  *  +------------------------------------------------------------+
120  */
121 
122 #define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
123 #define PSTATE_MG	0x400	/* enable spitfire MMU globals */
124 #define PSTATE_CLE	0x200	/* current little endian */
125 #define PSTATE_TLE	0x100	/* traps little endian */
126 #define PSTATE_MM	0x0c0	/* memory model */
127 #define PSTATE_MM_TSO	0x000	/* total store order */
128 #define PSTATE_MM_PSO	0x040	/* partial store order */
129 #define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
130 #define PSTATE_RED	0x020	/* RED state */
131 #define PSTATE_PEF	0x010	/* enable floating point */
132 #define PSTATE_AM	0x008	/* 32-bit address masking */
133 #define PSTATE_PRIV	0x004	/* privileged mode */
134 #define PSTATE_IE	0x002	/* interrupt enable */
135 #define PSTATE_AG	0x001	/* enable alternate globals */
136 
137 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
138 
139 
140 /*
141  * 32-bit code requires TSO or at best PSO since that's what's supported on
142  * SPARC V8 and earlier machines.
143  *
144  * 64-bit code sets the memory model in the ELF header.
145  *
146  * We're running kernel code in TSO for the moment so we don't need to worry
147  * about possible memory barrier bugs.
148  */
149 
150 #ifdef __arch64__
151 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
152 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
153 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
154 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
155 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
156 #define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
157 #else
158 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
159 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG)
160 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV)
161 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
162 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
163 #define PSTATE_USER	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
164 #endif
165 
166 /*
167  * SPARC V9 TSTATE register
168  *
169  *   39 32 31 24 23 18  17   8	7 5 4   0
170  *  +-----+-----+-----+--------+---+-----+
171  *  | CCR | ASI |  -  | PSTATE | - | CWP |
172  *  +-----+-----+-----+--------+---+-----+
173  * */
174 
175 #define TSTATE_CWP		0x01f
176 #define TSTATE_PSTATE		0x6ff00
177 #define TSTATE_PSTATE_SHIFT	8
178 #define TSTATE_ASI		0xff000000LL
179 #define TSTATE_ASI_SHIFT	24
180 #define TSTATE_CCR		0xff00000000LL
181 #define TSTATE_CCR_SHIFT	32
182 
183 #define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-19))
184 #define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-19))
185 
186 /*
187  * These are here to simplify life.
188  */
189 #define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
190 #define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
191 #define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
192 #define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
193 #define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
194 #define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
195 #define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
196 #define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
197 #define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
198 #define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
199 #define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
200 #define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
201 #define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
202 #define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
203 
204 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
205 
206 #define TSTATE_KERN	((TSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
207 #define TSTATE_USER	((TSTATE_USER)<<TSTATE_PSTATE_SHIFT)
208 /*
209  * SPARC V9 VER version register.
210  *
211  *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
212  * +-------+------+------+-----+-------+---+--------+
213  * | manuf | impl | mask |  -  | maxtl | - | maxwin |
214  * +-------+------+------+-----+-------+---+--------+
215  *
216  */
217 
218 #define VER_MANUF	0xffff000000000000LL
219 #define VER_MANUF_SHIFT	48
220 #define VER_IMPL	0x0000ffff00000000LL
221 #define VER_IMPL_SHIFT	32
222 #define VER_MASK	0x00000000ff000000LL
223 #define VER_MASK_SHIFT	24
224 #define VER_MAXTL	0x000000000000ff00LL
225 #define VER_MAXTL_SHIFT	8
226 #define VER_MAXWIN	0x000000000000001fLL
227 
228 /*
229  * Here are a few things to help us transition between user and kernel mode:
230  */
231 
232 /* Memory models */
233 #define KERN_MM		PSTATE_MM_TSO
234 #define USER_MM		PSTATE_MM_RMO
235 
236 /*
237  * Register window handlers.  These point to generic routines that check the
238  * stack pointer and then vector to the real handler.  We could optimize this
239  * if we could guarantee only 32-bit or 64-bit stacks.
240  */
241 #define WSTATE_KERN	026
242 #define WSTATE_USER	022
243 
244 #define CWP		0x01f
245 
246 /* 64-byte alignment -- this seems the best place to put this. */
247 #define BLOCK_SIZE	64
248 #define BLOCK_ALIGN	0x3f
249 
250 #if defined(_KERNEL) && !defined(_LOCORE)
251 
252 static __inline int getpsr __P((void));
253 static __inline void setpsr __P((int));
254 static __inline void spl0 __P((void));
255 static __inline int splhigh __P((void));
256 static __inline void splx __P((int));
257 static __inline int getmid __P((void));
258 
259 /*
260  * GCC pseudo-functions for manipulating PSR (primarily PIL field).
261  */
262 static __inline int getpsr()
263 {
264 	int psr;
265 
266 	__asm __volatile("rd %%psr,%0" : "=r" (psr));
267 	return (psr);
268 }
269 
270 static __inline int getmid()
271 {
272 	int mid;
273 
274 	__asm __volatile("rd %%tbr,%0" : "=r" (mid));
275 	return ((mid >> 20) & 0x3);
276 }
277 
278 static __inline void setpsr(newpsr)
279 	int newpsr;
280 {
281 	__asm __volatile("wr %0,0,%%psr" : : "r" (newpsr));
282 	__asm __volatile("nop; nop; nop");
283 }
284 
285 static __inline void spl0()
286 {
287 	int psr, oldipl;
288 
289 	/*
290 	 * wrpsr xors two values: we choose old psr and old ipl here,
291 	 * which gives us the same value as the old psr but with all
292 	 * the old PIL bits turned off.
293 	 */
294 	__asm __volatile("rd %%psr,%0" : "=r" (psr));
295 	oldipl = psr & PSR_PIL;
296 	__asm __volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
297 
298 	/*
299 	 * Three instructions must execute before we can depend
300 	 * on the bits to be changed.
301 	 */
302 	__asm __volatile("nop; nop; nop");
303 }
304 
305 /*
306  * PIL 1 through 14 can use this macro.
307  * (spl0 and splhigh are special since they put all 0s or all 1s
308  * into the ipl field.)
309  */
310 #define	_SPLSET(name, newipl) \
311 static __inline void name __P((void)); \
312 static __inline void name() \
313 { \
314 	int psr, oldipl; \
315 	__asm __volatile("rd %%psr,%0" : "=r" (psr)); \
316 	oldipl = psr & PSR_PIL; \
317 	psr &= ~oldipl; \
318 	__asm __volatile("wr %0,%1,%%psr" : : \
319 	    "r" (psr), "n" ((newipl) << 8)); \
320 	__asm __volatile("nop; nop; nop"); \
321 }
322 
323 /* Raise IPL and return previous value */
324 #define	_SPLRAISE(name, newipl) \
325 static __inline int name __P((void)); \
326 static __inline int name() \
327 { \
328 	int psr, oldipl; \
329 	__asm __volatile("rd %%psr,%0" : "=r" (psr)); \
330 	oldipl = psr & PSR_PIL; \
331 	if ((newipl << 8) <= oldipl) \
332 		return (oldipl); \
333 	psr &= ~oldipl; \
334 	__asm __volatile("wr %0,%1,%%psr" : : \
335 	    "r" (psr), "n" ((newipl) << 8)); \
336 	__asm __volatile("nop; nop; nop"); \
337 	return (oldipl); \
338 }
339 
340 _SPLSET(spllowersoftclock, 1)
341 
342 _SPLRAISE(splsoftint, 1)
343 #define	splsoftclock	splsoftint
344 #define	splsoftnet	splsoftint
345 
346 
347 /* audio software interrupts */
348 _SPLRAISE(splausoft, PIL_AUSOFT)
349 
350 /* floppy software interrupts */
351 _SPLRAISE(splfdsoft, PIL_FDSOFT)
352 
353 /* Block devices */
354 _SPLRAISE(splbio, 5)
355 
356 /* tty input runs at software level 6 */
357 _SPLRAISE(spltty, PIL_TTY)
358 
359 /* network hardware interrupts are at level 7 */
360 _SPLRAISE(splnet, PIL_NET)
361 
362 /*
363  * Memory allocation (must be as high as highest network, tty, or disk device)
364  */
365 _SPLRAISE(splvm, 7)
366 
367 /* clock interrupts at level 10 */
368 _SPLRAISE(splclock, PIL_CLOCK)
369 
370 /* fd hardware, ts102, and tadpole microcontoller interrupts are at level 11 */
371 _SPLRAISE(splfd, 11)
372 _SPLRAISE(splts102, 11)
373 
374 /* zs hardware interrupts are at level 12 */
375 _SPLRAISE(splzs, 12)
376 
377 /* su (com) hardware interrupts are at level 13 (protects zs as well) */
378 _SPLRAISE(splserial, 13)
379 
380 /* audio hardware interrupts are at level 13 */
381 _SPLRAISE(splaudio, 13)
382 
383 /* second sparc timer interrupts at level 14 */
384 _SPLRAISE(splstatclock, 14)
385 
386 static __inline int splhigh()
387 {
388 	int psr, oldipl;
389 
390 	__asm __volatile("rd %%psr,%0" : "=r" (psr));
391 	__asm __volatile("wr %0,0,%%psr" : : "r" (psr | PSR_PIL));
392 	__asm __volatile("and %1,%2,%0; nop; nop" : "=r" (oldipl) : \
393 	    "r" (psr), "n" (PSR_PIL));
394 	return (oldipl);
395 }
396 
397 #define	splsched()	splhigh()
398 #define	spllock()	splhigh()
399 
400 /* splx does not have a return value */
401 static __inline void splx(newipl)
402 	int newipl;
403 {
404 	int psr;
405 
406 	__asm __volatile("rd %%psr,%0" : "=r" (psr));
407 	__asm __volatile("wr %0,%1,%%psr" : : \
408 	    "r" (psr & ~PSR_PIL), "rn" (newipl));
409 	__asm __volatile("nop; nop; nop");
410 }
411 #endif /* KERNEL && !_LOCORE */
412 
413 #endif /* PSR_IMPL */
414