xref: /dragonfly/sys/cpu/x86_64/include/types.h (revision c66c7e2f)
1 /*-
2  * Copyright (c) 1990, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 2008 The DragonFly Project.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)types.h	8.3 (Berkeley) 1/5/94
31  * $FreeBSD: src/sys/i386/include/types.h,v 1.19.2.1 2001/03/21 10:50:58 peter Exp $
32  */
33 
34 #ifndef _CPU_TYPES_H_
35 #define	_CPU_TYPES_H_
36 
37 #include <machine/stdint.h>
38 
39 #if defined(__x86_64__)
40 typedef	__int64_t	__segsz_t;	/* segment size */
41 typedef	__int64_t	register_t;
42 typedef	__uint64_t	u_register_t;
43 #elif defined(__i386__)
44 typedef	__int32_t	__segsz_t;	/* segment size */
45 typedef	__int32_t	register_t;
46 typedef	__uint32_t	u_register_t;
47 #endif
48 
49 typedef unsigned long	vm_offset_t;    /* address space bounded offset */
50 typedef unsigned long	vm_size_t;      /* address space bounded size */
51 
52 typedef __uint64_t	vm_pindex_t;    /* physical page index */
53 typedef	__int64_t	vm_ooffset_t;	/* VM object bounded offset */
54 typedef __uint64_t	vm_poff_t;	/* physical offset */
55 typedef __uint64_t	vm_paddr_t;	/* physical addr (same as vm_poff_t) */
56 
57 #ifdef _KERNEL
58 typedef	__int64_t	intfptr_t;
59 typedef	__uint64_t	uintfptr_t;
60 #endif
61 
62 /*
63  * MMU page tables
64  */
65 typedef __uint64_t	pml4_entry_t;
66 typedef __uint64_t	pdp_entry_t;
67 typedef __uint64_t	pd_entry_t;
68 typedef __uint64_t	pt_entry_t;
69 typedef __uint32_t      cpulock_t;      /* count and exclusive lock */
70 
71 /*
72  * cpumask_t - a mask representing a set of cpus and supporting routines.
73  *
74  * WARNING! It is recommended that this mask NOT be made variably-sized
75  *	    because it affects a huge number of system structures.  However,
76  *	    kernel code (non-module) can be optimized to not operate on the
77  *	    whole mask.
78  */
79 
80 #define CPUMASK_ELEMENTS	4	/* tested by assembly for #error */
81 
82 typedef struct {
83 	__uint64_t      ary[4];
84 } cpumask_t;
85 
86 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
87 
88 #define CPUMASK_INITIALIZER_ALLONES	{ .ary = { (__uint64_t)-1, \
89 					  (__uint64_t)-1, \
90 					  (__uint64_t)-1, \
91 					  (__uint64_t)-1 } }
92 #define CPUMASK_INITIALIZER_ONLYONE	{ .ary = { 1, 0, 0, 0 } }
93 
94 #define CPUMASK_SIMPLE(cpu)	((__uint64_t)1 << (cpu))
95 
96 #define BSRCPUMASK(val)		((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
97 				((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
98 				((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
99 						bsrq((val).ary[0]))))
100 
101 #define BSFCPUMASK(val)		((val).ary[0] ? bsfq((val).ary[0]) : \
102 				((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
103 				((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
104 						192 + bsfq((val).ary[3]))))
105 
106 #define CPUMASK_CMPMASKEQ(val1, val2)	((val1).ary[0] == (val2).ary[0] && \
107 					 (val1).ary[1] == (val2).ary[1] && \
108 					 (val1).ary[2] == (val2).ary[2] && \
109 					 (val1).ary[3] == (val2).ary[3])
110 
111 #define CPUMASK_CMPMASKNEQ(val1, val2)	((val1).ary[0] != (val2).ary[0] || \
112 					 (val1).ary[1] != (val2).ary[1] || \
113 					 (val1).ary[2] != (val2).ary[2] || \
114 					 (val1).ary[3] != (val2).ary[3])
115 
116 #define CPUMASK_ISUP(val)		((val).ary[0] == 1 && \
117 					 (val).ary[1] == 0 && \
118 					 (val).ary[2] == 0 && \
119 					 (val).ary[3] == 0)
120 
121 #define CPUMASK_TESTZERO(val)		((val).ary[0] == 0 && \
122 					 (val).ary[1] == 0 && \
123 					 (val).ary[2] == 0 && \
124 					 (val).ary[3] == 0)
125 
126 #define CPUMASK_TESTNZERO(val)		((val).ary[0] != 0 || \
127 					 (val).ary[1] != 0 || \
128 					 (val).ary[2] != 0 || \
129 					 (val).ary[3] != 0)
130 
131 #define CPUMASK_TESTBIT(val, i)		((val).ary[((i) >> 6) & 3] & \
132 					 CPUMASK_SIMPLE((i) & 63))
133 
134 #define CPUMASK_TESTMASK(val1, val2)	(((val1).ary[0] & (val2.ary[0])) || \
135 					 ((val1).ary[1] & (val2.ary[1])) || \
136 					 ((val1).ary[2] & (val2.ary[2])) || \
137 					 ((val1).ary[3] & (val2.ary[3])))
138 
139 #define CPUMASK_LOWMASK(val)		((val).ary[0])
140 
141 #define CPUMASK_ORBIT(mask, i)		((mask).ary[((i) >> 6) & 3] |= \
142 					 CPUMASK_SIMPLE((i) & 63))
143 
144 #define CPUMASK_ANDBIT(mask, i)		((mask).ary[((i) >> 6) & 3] &= \
145 					 CPUMASK_SIMPLE((i) & 63))
146 
147 #define CPUMASK_NANDBIT(mask, i)	((mask).ary[((i) >> 6) & 3] &= \
148 					 ~CPUMASK_SIMPLE((i) & 63))
149 
150 #define CPUMASK_ASSZERO(mask)		do {				\
151 					(mask).ary[0] = 0;		\
152 					(mask).ary[1] = 0;		\
153 					(mask).ary[2] = 0;		\
154 					(mask).ary[3] = 0;		\
155 					} while(0)
156 
157 #define CPUMASK_ASSALLONES(mask)	do {				\
158 					(mask).ary[0] = (__uint64_t)-1;	\
159 					(mask).ary[1] = (__uint64_t)-1;	\
160 					(mask).ary[2] = (__uint64_t)-1;	\
161 					(mask).ary[3] = (__uint64_t)-1;	\
162 					} while(0)
163 
164 #define CPUMASK_ASSBIT(mask, i)		do {				\
165 						CPUMASK_ASSZERO(mask);	\
166 						CPUMASK_ORBIT(mask, i); \
167 					} while(0)
168 
169 #define CPUMASK_ASSBMASK(mask, i)	do {				\
170 		if (i < 64) {						\
171 			(mask).ary[0] = CPUMASK_SIMPLE(i) - 1;		\
172 			(mask).ary[1] = 0;				\
173 			(mask).ary[2] = 0;				\
174 			(mask).ary[3] = 0;				\
175 		} else if (i < 128) {					\
176 			(mask).ary[0] = (__uint64_t)-1;			\
177 			(mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1;	\
178 			(mask).ary[2] = 0;				\
179 			(mask).ary[3] = 0;				\
180 		} else if (i < 192) {					\
181 			(mask).ary[0] = (__uint64_t)-1;			\
182 			(mask).ary[1] = (__uint64_t)-1;			\
183 			(mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1;	\
184 			(mask).ary[3] = 0;				\
185 		} else {						\
186 			(mask).ary[0] = (__uint64_t)-1;			\
187 			(mask).ary[1] = (__uint64_t)-1;			\
188 			(mask).ary[2] = (__uint64_t)-1;			\
189 			(mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1;	\
190 		}							\
191 					} while(0)
192 
193 #define CPUMASK_ASSNBMASK(mask, i)	do {				\
194 		if (i < 64) {						\
195 			(mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1);	\
196 			(mask).ary[1] = (__uint64_t)-1;			\
197 			(mask).ary[2] = (__uint64_t)-1;			\
198 			(mask).ary[3] = (__uint64_t)-1;			\
199 		} else if (i < 128) {					\
200 			(mask).ary[0] = 0;				\
201 			(mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
202 			(mask).ary[2] = (__uint64_t)-1;			\
203 			(mask).ary[3] = (__uint64_t)-1;			\
204 		} else if (i < 192) {					\
205 			(mask).ary[0] = 0;				\
206 			(mask).ary[1] = 0;				\
207 			(mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
208 			(mask).ary[3] = (__uint64_t)-1;			\
209 		} else {						\
210 			(mask).ary[0] = 0;				\
211 			(mask).ary[1] = 0;				\
212 			(mask).ary[2] = 0;				\
213 			(mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
214 		}							\
215 					} while(0)
216 
217 #define CPUMASK_ANDMASK(mask, val)	do {				\
218 					(mask).ary[0] &= (val).ary[0];	\
219 					(mask).ary[1] &= (val).ary[1];	\
220 					(mask).ary[2] &= (val).ary[2];	\
221 					(mask).ary[3] &= (val).ary[3];	\
222 					} while(0)
223 
224 #define CPUMASK_NANDMASK(mask, val)	do {				\
225 					(mask).ary[0] &= ~(val).ary[0];	\
226 					(mask).ary[1] &= ~(val).ary[1];	\
227 					(mask).ary[2] &= ~(val).ary[2];	\
228 					(mask).ary[3] &= ~(val).ary[3];	\
229 					} while(0)
230 
231 #define CPUMASK_ORMASK(mask, val)	do {				\
232 					(mask).ary[0] |= (val).ary[0];	\
233 					(mask).ary[1] |= (val).ary[1];	\
234 					(mask).ary[2] |= (val).ary[2];	\
235 					(mask).ary[3] |= (val).ary[3];	\
236 					} while(0)
237 
238 #define CPUMASK_XORMASK(mask, val)	do {				\
239 					(mask).ary[0] ^= (val).ary[0];	\
240 					(mask).ary[1] ^= (val).ary[1];	\
241 					(mask).ary[2] ^= (val).ary[2];	\
242 					(mask).ary[3] ^= (val).ary[3];	\
243 					} while(0)
244 
245 #define ATOMIC_CPUMASK_ORBIT(mask, i)					  \
246 			atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3],	  \
247 					   CPUMASK_SIMPLE((i) & 63))
248 
249 #define ATOMIC_CPUMASK_NANDBIT(mask, i)					  \
250 			atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
251 					   CPUMASK_SIMPLE((i) & 63))
252 
253 #define ATOMIC_CPUMASK_ORMASK(mask, val) do {				  \
254 			atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
255 			atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
256 			atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
257 			atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
258 					 } while(0)
259 
260 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do {				    \
261 			atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
262 			atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
263 			atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
264 			atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
265 					 } while(0)
266 
267 #endif
268 
269 #define CPULOCK_EXCLBIT	0		/* exclusive lock bit number */
270 #define CPULOCK_EXCL	0x00000001	/* exclusive lock */
271 #define CPULOCK_INCR	0x00000002	/* auxillary counter add/sub */
272 #define CPULOCK_CNTMASK	0x7FFFFFFE
273 
274 #define PML4SIZE	sizeof(pml4_entry_t) /* for assembly files */
275 #define PDPSIZE		sizeof(pdp_entry_t) /* for assembly files */
276 #define PDESIZE         sizeof(pd_entry_t) /* for assembly files */
277 #define PTESIZE         sizeof(pt_entry_t) /* for assembly files */
278 
279 #endif /* !_CPU_TYPES_H_ */
280