xref: /dragonfly/sys/cpu/x86_64/include/types.h (revision f503b4c4)
1 /*-
2  * Copyright (c) 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 2008 The DragonFly Project.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by the University of
17  *	California, Berkeley and its contributors.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)types.h	8.3 (Berkeley) 1/5/94
35  * $FreeBSD: src/sys/i386/include/types.h,v 1.19.2.1 2001/03/21 10:50:58 peter Exp $
36  */
37 
38 #ifndef _CPU_TYPES_H_
39 #define	_CPU_TYPES_H_
40 
41 #include <machine/stdint.h>
42 
43 #if defined(__x86_64__)
44 typedef	__int64_t	__segsz_t;	/* segment size */
45 typedef	__int64_t	register_t;
46 typedef	__uint64_t	u_register_t;
47 #elif defined(__i386__)
48 typedef	__int32_t	__segsz_t;	/* segment size */
49 typedef	__int32_t	register_t;
50 typedef	__uint32_t	u_register_t;
51 #endif
52 
53 typedef unsigned long	vm_offset_t;    /* address space bounded offset */
54 typedef unsigned long	vm_size_t;      /* address space bounded size */
55 
56 typedef __uint64_t	vm_pindex_t;    /* physical page index */
57 typedef	__int64_t	vm_ooffset_t;	/* VM object bounded offset */
58 typedef __uint64_t	vm_poff_t;	/* physical offset */
59 typedef __uint64_t	vm_paddr_t;	/* physical addr (same as vm_poff_t) */
60 
61 #ifdef _KERNEL
62 typedef	__int64_t	intfptr_t;
63 typedef	__uint64_t	uintfptr_t;
64 #endif
65 
66 /*
67  * MMU page tables
68  */
69 typedef __uint64_t	pml4_entry_t;
70 typedef __uint64_t	pdp_entry_t;
71 typedef __uint64_t	pd_entry_t;
72 typedef __uint64_t	pt_entry_t;
73 typedef __uint32_t      cpulock_t;      /* count and exclusive lock */
74 
75 /*
76  * cpumask_t - a mask representing a set of cpus and supporting routines.
77  *
78  * WARNING! It is recommended that this mask NOT be made variably-sized
79  *	    because it affects a huge number of system structures.  However,
80  *	    kernel code (non-module) can be optimized to not operate on the
81  *	    whole mask.
82  */
83 
84 #define CPUMASK_ELEMENTS	4	/* tested by assembly for #error */
85 
86 typedef struct {
87 	__uint64_t      ary[4];
88 } cpumask_t;
89 
90 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
91 
92 #define CPUMASK_INITIALIZER_ALLONES	{ .ary = { (__uint64_t)-1, \
93 					  (__uint64_t)-1, \
94 					  (__uint64_t)-1, \
95 					  (__uint64_t)-1 } }
96 #define CPUMASK_INITIALIZER_ONLYONE	{ .ary = { 1, 0, 0, 0 } }
97 
98 #define CPUMASK_SIMPLE(cpu)	((__uint64_t)1 << (cpu))
99 
100 #define BSRCPUMASK(val)		((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
101 				((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
102 				((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
103 						bsrq((val).ary[0]))))
104 
105 #define BSFCPUMASK(val)		((val).ary[0] ? bsfq((val).ary[0]) : \
106 				((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
107 				((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
108 						192 + bsfq((val).ary[3]))))
109 
110 #define CPUMASK_CMPMASKEQ(val1, val2)	((val1).ary[0] == (val2).ary[0] && \
111 					 (val1).ary[1] == (val2).ary[1] && \
112 					 (val1).ary[2] == (val2).ary[2] && \
113 					 (val1).ary[3] == (val2).ary[3])
114 
115 #define CPUMASK_CMPMASKNEQ(val1, val2)	((val1).ary[0] != (val2).ary[0] || \
116 					 (val1).ary[1] != (val2).ary[1] || \
117 					 (val1).ary[2] != (val2).ary[2] || \
118 					 (val1).ary[3] != (val2).ary[3])
119 
120 #define CPUMASK_ISUP(val)		((val).ary[0] == 1 && \
121 					 (val).ary[1] == 0 && \
122 					 (val).ary[2] == 0 && \
123 					 (val).ary[3] == 0)
124 
125 #define CPUMASK_TESTZERO(val)		((val).ary[0] == 0 && \
126 					 (val).ary[1] == 0 && \
127 					 (val).ary[2] == 0 && \
128 					 (val).ary[3] == 0)
129 
130 #define CPUMASK_TESTNZERO(val)		((val).ary[0] != 0 || \
131 					 (val).ary[1] != 0 || \
132 					 (val).ary[2] != 0 || \
133 					 (val).ary[3] != 0)
134 
135 #define CPUMASK_TESTBIT(val, i)		((val).ary[((i) >> 6) & 3] & \
136 					 CPUMASK_SIMPLE((i) & 63))
137 
138 #define CPUMASK_TESTMASK(val1, val2)	(((val1).ary[0] & (val2.ary[0])) || \
139 					 ((val1).ary[1] & (val2.ary[1])) || \
140 					 ((val1).ary[2] & (val2.ary[2])) || \
141 					 ((val1).ary[3] & (val2.ary[3])))
142 
143 #define CPUMASK_LOWMASK(val)		((val).ary[0])
144 
145 #define CPUMASK_ORBIT(mask, i)		((mask).ary[((i) >> 6) & 3] |= \
146 					 CPUMASK_SIMPLE((i) & 63))
147 
148 #define CPUMASK_ANDBIT(mask, i)		((mask).ary[((i) >> 6) & 3] &= \
149 					 CPUMASK_SIMPLE((i) & 63))
150 
151 #define CPUMASK_NANDBIT(mask, i)	((mask).ary[((i) >> 6) & 3] &= \
152 					 ~CPUMASK_SIMPLE((i) & 63))
153 
154 #define CPUMASK_ASSZERO(mask)		do {				\
155 					(mask).ary[0] = 0;		\
156 					(mask).ary[1] = 0;		\
157 					(mask).ary[2] = 0;		\
158 					(mask).ary[3] = 0;		\
159 					} while(0)
160 
161 #define CPUMASK_ASSALLONES(mask)	do {				\
162 					(mask).ary[0] = (__uint64_t)-1;	\
163 					(mask).ary[1] = (__uint64_t)-1;	\
164 					(mask).ary[2] = (__uint64_t)-1;	\
165 					(mask).ary[3] = (__uint64_t)-1;	\
166 					} while(0)
167 
168 #define CPUMASK_ASSBIT(mask, i)		do {				\
169 						CPUMASK_ASSZERO(mask);	\
170 						CPUMASK_ORBIT(mask, i); \
171 					} while(0)
172 
173 #define CPUMASK_ASSBMASK(mask, i)	do {				\
174 		if (i < 64) {						\
175 			(mask).ary[0] = CPUMASK_SIMPLE(i) - 1;		\
176 			(mask).ary[1] = 0;				\
177 			(mask).ary[2] = 0;				\
178 			(mask).ary[3] = 0;				\
179 		} else if (i < 128) {					\
180 			(mask).ary[0] = (__uint64_t)-1;			\
181 			(mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1;	\
182 			(mask).ary[2] = 0;				\
183 			(mask).ary[3] = 0;				\
184 		} else if (i < 192) {					\
185 			(mask).ary[0] = (__uint64_t)-1;			\
186 			(mask).ary[1] = (__uint64_t)-1;			\
187 			(mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1;	\
188 			(mask).ary[3] = 0;				\
189 		} else {						\
190 			(mask).ary[0] = (__uint64_t)-1;			\
191 			(mask).ary[1] = (__uint64_t)-1;			\
192 			(mask).ary[2] = (__uint64_t)-1;			\
193 			(mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1;	\
194 		}							\
195 					} while(0)
196 
197 #define CPUMASK_ASSNBMASK(mask, i)	do {				\
198 		if (i < 64) {						\
199 			(mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1);	\
200 			(mask).ary[1] = (__uint64_t)-1;			\
201 			(mask).ary[2] = (__uint64_t)-1;			\
202 			(mask).ary[3] = (__uint64_t)-1;			\
203 		} else if (i < 128) {					\
204 			(mask).ary[0] = 0;				\
205 			(mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
206 			(mask).ary[2] = (__uint64_t)-1;			\
207 			(mask).ary[3] = (__uint64_t)-1;			\
208 		} else if (i < 192) {					\
209 			(mask).ary[0] = 0;				\
210 			(mask).ary[1] = 0;				\
211 			(mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
212 			(mask).ary[3] = (__uint64_t)-1;			\
213 		} else {						\
214 			(mask).ary[0] = 0;				\
215 			(mask).ary[1] = 0;				\
216 			(mask).ary[2] = 0;				\
217 			(mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
218 		}							\
219 					} while(0)
220 
221 #define CPUMASK_ANDMASK(mask, val)	do {				\
222 					(mask).ary[0] &= (val).ary[0];	\
223 					(mask).ary[1] &= (val).ary[1];	\
224 					(mask).ary[2] &= (val).ary[2];	\
225 					(mask).ary[3] &= (val).ary[3];	\
226 					} while(0)
227 
228 #define CPUMASK_NANDMASK(mask, val)	do {				\
229 					(mask).ary[0] &= ~(val).ary[0];	\
230 					(mask).ary[1] &= ~(val).ary[1];	\
231 					(mask).ary[2] &= ~(val).ary[2];	\
232 					(mask).ary[3] &= ~(val).ary[3];	\
233 					} while(0)
234 
235 #define CPUMASK_ORMASK(mask, val)	do {				\
236 					(mask).ary[0] |= (val).ary[0];	\
237 					(mask).ary[1] |= (val).ary[1];	\
238 					(mask).ary[2] |= (val).ary[2];	\
239 					(mask).ary[3] |= (val).ary[3];	\
240 					} while(0)
241 
242 #define ATOMIC_CPUMASK_ORBIT(mask, i)					  \
243 			atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3],	  \
244 					   CPUMASK_SIMPLE((i) & 63))
245 
246 #define ATOMIC_CPUMASK_NANDBIT(mask, i)					  \
247 			atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
248 					   CPUMASK_SIMPLE((i) & 63))
249 
250 #define ATOMIC_CPUMASK_ORMASK(mask, val) do {				  \
251 			atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
252 			atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
253 			atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
254 			atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
255 					 } while(0)
256 
257 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do {				    \
258 			atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
259 			atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
260 			atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
261 			atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
262 					 } while(0)
263 
264 #endif
265 
266 #define CPULOCK_EXCLBIT	0		/* exclusive lock bit number */
267 #define CPULOCK_EXCL	0x00000001	/* exclusive lock */
268 #define CPULOCK_INCR	0x00000002	/* auxillary counter add/sub */
269 #define CPULOCK_CNTMASK	0x7FFFFFFE
270 
271 #define PML4SIZE	sizeof(pml4_entry_t) /* for assembly files */
272 #define PDPSIZE		sizeof(pdp_entry_t) /* for assembly files */
273 #define PDESIZE         sizeof(pd_entry_t) /* for assembly files */
274 #define PTESIZE         sizeof(pt_entry_t) /* for assembly files */
275 
276 #endif /* !_CPU_TYPES_H_ */
277