xref: /dragonfly/sys/cpu/x86_64/include/cpumask.h (revision 16dd80e4)
1 /*
2  * Copyright (c) 2008-2016 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #ifndef _CPU_CPUMASK_H_
36 #define	_CPU_CPUMASK_H_
37 
38 #include <cpu/types.h>
39 #ifdef _KERNEL
40 #include <cpu/atomic.h>
41 #endif
42 
43 #if _CPUMASK_ELEMENTS != 4
44 #error "CPUMASK macros incompatible with cpumask_t"
45 #endif
46 
47 #define CPUMASK_ELEMENTS	_CPUMASK_ELEMENTS
48 
49 #define CPUMASK_INITIALIZER_ALLONES	{ .ary = { (__uint64_t)-1, \
50 					  (__uint64_t)-1, \
51 					  (__uint64_t)-1, \
52 					  (__uint64_t)-1 } }
53 #define CPUMASK_INITIALIZER_ONLYONE	{ .ary = { 1, 0, 0, 0 } }
54 
55 #define CPUMASK_SIMPLE(cpu)	((__uint64_t)1 << (cpu))
56 
57 #define CPUMASK_ADDR(mask, cpu)	(&(mask).ary[((cpu) >> 6) & 3])
58 
59 #define BSRCPUMASK(val)		((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
60 				((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
61 				((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
62 						bsrq((val).ary[0]))))
63 
64 #define BSFCPUMASK(val)		((val).ary[0] ? bsfq((val).ary[0]) : \
65 				((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
66 				((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
67 						192 + bsfq((val).ary[3]))))
68 
69 #define CPUMASK_CMPMASKEQ(val1, val2)	((val1).ary[0] == (val2).ary[0] && \
70 					 (val1).ary[1] == (val2).ary[1] && \
71 					 (val1).ary[2] == (val2).ary[2] && \
72 					 (val1).ary[3] == (val2).ary[3])
73 
74 #define CPUMASK_CMPMASKNEQ(val1, val2)	((val1).ary[0] != (val2).ary[0] || \
75 					 (val1).ary[1] != (val2).ary[1] || \
76 					 (val1).ary[2] != (val2).ary[2] || \
77 					 (val1).ary[3] != (val2).ary[3])
78 
79 #define CPUMASK_ISUP(val)		((val).ary[0] == 1 && \
80 					 (val).ary[1] == 0 && \
81 					 (val).ary[2] == 0 && \
82 					 (val).ary[3] == 0)
83 
84 #define CPUMASK_TESTZERO(val)		((val).ary[0] == 0 && \
85 					 (val).ary[1] == 0 && \
86 					 (val).ary[2] == 0 && \
87 					 (val).ary[3] == 0)
88 
89 #define CPUMASK_TESTNZERO(val)		((val).ary[0] != 0 || \
90 					 (val).ary[1] != 0 || \
91 					 (val).ary[2] != 0 || \
92 					 (val).ary[3] != 0)
93 
94 #define CPUMASK_TESTBIT(val, i)		((val).ary[((i) >> 6) & 3] & \
95 					 CPUMASK_SIMPLE((i) & 63))
96 
97 #define CPUMASK_TESTMASK(val1, val2)	(((val1).ary[0] & (val2.ary[0])) || \
98 					 ((val1).ary[1] & (val2.ary[1])) || \
99 					 ((val1).ary[2] & (val2.ary[2])) || \
100 					 ((val1).ary[3] & (val2.ary[3])))
101 
102 #define CPUMASK_LOWMASK(val)		((val).ary[0])
103 
104 #define CPUMASK_ORBIT(mask, i)		((mask).ary[((i) >> 6) & 3] |= \
105 					 CPUMASK_SIMPLE((i) & 63))
106 
107 #define CPUMASK_ANDBIT(mask, i)		((mask).ary[((i) >> 6) & 3] &= \
108 					 CPUMASK_SIMPLE((i) & 63))
109 
110 #define CPUMASK_NANDBIT(mask, i)	((mask).ary[((i) >> 6) & 3] &= \
111 					 ~CPUMASK_SIMPLE((i) & 63))
112 
113 #define CPUMASK_ASSZERO(mask)		do {				\
114 					(mask).ary[0] = 0;		\
115 					(mask).ary[1] = 0;		\
116 					(mask).ary[2] = 0;		\
117 					(mask).ary[3] = 0;		\
118 					} while(0)
119 
120 #define CPUMASK_ASSALLONES(mask)	do {				\
121 					(mask).ary[0] = (__uint64_t)-1;	\
122 					(mask).ary[1] = (__uint64_t)-1;	\
123 					(mask).ary[2] = (__uint64_t)-1;	\
124 					(mask).ary[3] = (__uint64_t)-1;	\
125 					} while(0)
126 
127 #define CPUMASK_ASSBIT(mask, i)		do {				\
128 						CPUMASK_ASSZERO(mask);	\
129 						CPUMASK_ORBIT(mask, i); \
130 					} while(0)
131 
132 #define CPUMASK_ASSBMASK(mask, i)	do {				\
133 		if (i < 64) {						\
134 			(mask).ary[0] = CPUMASK_SIMPLE(i) - 1;		\
135 			(mask).ary[1] = 0;				\
136 			(mask).ary[2] = 0;				\
137 			(mask).ary[3] = 0;				\
138 		} else if (i < 128) {					\
139 			(mask).ary[0] = (__uint64_t)-1;			\
140 			(mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1;	\
141 			(mask).ary[2] = 0;				\
142 			(mask).ary[3] = 0;				\
143 		} else if (i < 192) {					\
144 			(mask).ary[0] = (__uint64_t)-1;			\
145 			(mask).ary[1] = (__uint64_t)-1;			\
146 			(mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1;	\
147 			(mask).ary[3] = 0;				\
148 		} else {						\
149 			(mask).ary[0] = (__uint64_t)-1;			\
150 			(mask).ary[1] = (__uint64_t)-1;			\
151 			(mask).ary[2] = (__uint64_t)-1;			\
152 			(mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1;	\
153 		}							\
154 					} while(0)
155 
156 #define CPUMASK_ASSNBMASK(mask, i)	do {				\
157 		if (i < 64) {						\
158 			(mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1);	\
159 			(mask).ary[1] = (__uint64_t)-1;			\
160 			(mask).ary[2] = (__uint64_t)-1;			\
161 			(mask).ary[3] = (__uint64_t)-1;			\
162 		} else if (i < 128) {					\
163 			(mask).ary[0] = 0;				\
164 			(mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
165 			(mask).ary[2] = (__uint64_t)-1;			\
166 			(mask).ary[3] = (__uint64_t)-1;			\
167 		} else if (i < 192) {					\
168 			(mask).ary[0] = 0;				\
169 			(mask).ary[1] = 0;				\
170 			(mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
171 			(mask).ary[3] = (__uint64_t)-1;			\
172 		} else {						\
173 			(mask).ary[0] = 0;				\
174 			(mask).ary[1] = 0;				\
175 			(mask).ary[2] = 0;				\
176 			(mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
177 		}							\
178 					} while(0)
179 
180 #define CPUMASK_ANDMASK(mask, val)	do {				\
181 					(mask).ary[0] &= (val).ary[0];	\
182 					(mask).ary[1] &= (val).ary[1];	\
183 					(mask).ary[2] &= (val).ary[2];	\
184 					(mask).ary[3] &= (val).ary[3];	\
185 					} while(0)
186 
187 #define CPUMASK_NANDMASK(mask, val)	do {				\
188 					(mask).ary[0] &= ~(val).ary[0];	\
189 					(mask).ary[1] &= ~(val).ary[1];	\
190 					(mask).ary[2] &= ~(val).ary[2];	\
191 					(mask).ary[3] &= ~(val).ary[3];	\
192 					} while(0)
193 
194 #define CPUMASK_ORMASK(mask, val)	do {				\
195 					(mask).ary[0] |= (val).ary[0];	\
196 					(mask).ary[1] |= (val).ary[1];	\
197 					(mask).ary[2] |= (val).ary[2];	\
198 					(mask).ary[3] |= (val).ary[3];	\
199 					} while(0)
200 
201 #define CPUMASK_XORMASK(mask, val)	do {				\
202 					(mask).ary[0] ^= (val).ary[0];	\
203 					(mask).ary[1] ^= (val).ary[1];	\
204 					(mask).ary[2] ^= (val).ary[2];	\
205 					(mask).ary[3] ^= (val).ary[3];	\
206 					} while(0)
207 
208 #define CPUMASK_INVMASK(mask)		do {				\
209 					(mask).ary[0] ^= -1L;		\
210 					(mask).ary[1] ^= -1L;		\
211 					(mask).ary[2] ^= -1L;		\
212 					(mask).ary[3] ^= -1L;		\
213 					} while(0)
214 
215 #ifdef _KERNEL
216 #define ATOMIC_CPUMASK_ORBIT(mask, i)					  \
217 			atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3],	  \
218 					   CPUMASK_SIMPLE((i) & 63))
219 
220 #define ATOMIC_CPUMASK_NANDBIT(mask, i)					  \
221 			atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
222 					   CPUMASK_SIMPLE((i) & 63))
223 
224 #define ATOMIC_CPUMASK_TESTANDSET(mask, i)				  \
225 		atomic_testandset_long(&(mask).ary[((i) >> 6) & 3], (i))
226 
227 #define ATOMIC_CPUMASK_TESTANDCLR(mask, i)				  \
228 		atomic_testandclear_long(&(mask).ary[((i) >> 6) & 3], (i))
229 
230 #define ATOMIC_CPUMASK_ORMASK(mask, val) do {				  \
231 			atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
232 			atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
233 			atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
234 			atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
235 					 } while(0)
236 
237 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do {				    \
238 			atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
239 			atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
240 			atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
241 			atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
242 					 } while(0)
243 
244 #define ATOMIC_CPUMASK_COPY(mask, val) do {				    \
245 			atomic_store_rel_cpumask(&(mask).ary[0], (val).ary[0]);\
246 			atomic_store_rel_cpumask(&(mask).ary[1], (val).ary[1]);\
247 			atomic_store_rel_cpumask(&(mask).ary[2], (val).ary[2]);\
248 			atomic_store_rel_cpumask(&(mask).ary[3], (val).ary[3]);\
249 					 } while(0)
250 #endif
251 
252 #endif /* !_CPU_CPUMASK_H_ */
253