xref: /dragonfly/sys/cpu/x86_64/include/cpumask.h (revision 277350a0)
1 /*
2  * Copyright (c) 2008-2016 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #ifndef _CPU_CPUMASK_H_
36 #define	_CPU_CPUMASK_H_
37 
38 #include <cpu/types.h>
39 #include <cpu/atomic.h>
40 
41 #if _CPUMASK_ELEMENTS != 4
42 #error "CPUMASK macros incompatible with cpumask_t"
43 #endif
44 
45 #define CPUMASK_ELEMENTS	_CPUMASK_ELEMENTS
46 
47 #define CPUMASK_INITIALIZER_ALLONES	{ .ary = { (__uint64_t)-1, \
48 					  (__uint64_t)-1, \
49 					  (__uint64_t)-1, \
50 					  (__uint64_t)-1 } }
51 #define CPUMASK_INITIALIZER_ONLYONE	{ .ary = { 1, 0, 0, 0 } }
52 
53 #define CPUMASK_SIMPLE(cpu)	((__uint64_t)1 << (cpu))
54 
55 #define CPUMASK_ADDR(mask, cpu)	(&(mask).ary[((cpu) >> 6) & 3])
56 
57 #define BSRCPUMASK(val)		((val).ary[3] ? 192 + bsrq((val).ary[3]) : \
58 				((val).ary[2] ? 128 + bsrq((val).ary[2]) : \
59 				((val).ary[1] ? 64 + bsrq((val).ary[1]) : \
60 						bsrq((val).ary[0]))))
61 
62 #define BSFCPUMASK(val)		((val).ary[0] ? bsfq((val).ary[0]) : \
63 				((val).ary[1] ? 64 + bsfq((val).ary[1]) : \
64 				((val).ary[2] ? 128 + bsfq((val).ary[2]) : \
65 						192 + bsfq((val).ary[3]))))
66 
67 #define CPUMASK_CMPMASKEQ(val1, val2)	((val1).ary[0] == (val2).ary[0] && \
68 					 (val1).ary[1] == (val2).ary[1] && \
69 					 (val1).ary[2] == (val2).ary[2] && \
70 					 (val1).ary[3] == (val2).ary[3])
71 
72 #define CPUMASK_CMPMASKNEQ(val1, val2)	((val1).ary[0] != (val2).ary[0] || \
73 					 (val1).ary[1] != (val2).ary[1] || \
74 					 (val1).ary[2] != (val2).ary[2] || \
75 					 (val1).ary[3] != (val2).ary[3])
76 
77 #define CPUMASK_ISUP(val)		((val).ary[0] == 1 && \
78 					 (val).ary[1] == 0 && \
79 					 (val).ary[2] == 0 && \
80 					 (val).ary[3] == 0)
81 
82 #define CPUMASK_TESTZERO(val)		((val).ary[0] == 0 && \
83 					 (val).ary[1] == 0 && \
84 					 (val).ary[2] == 0 && \
85 					 (val).ary[3] == 0)
86 
87 #define CPUMASK_TESTNZERO(val)		((val).ary[0] != 0 || \
88 					 (val).ary[1] != 0 || \
89 					 (val).ary[2] != 0 || \
90 					 (val).ary[3] != 0)
91 
92 #define CPUMASK_TESTBIT(val, i)		((val).ary[((i) >> 6) & 3] & \
93 					 CPUMASK_SIMPLE((i) & 63))
94 
95 #define CPUMASK_TESTMASK(val1, val2)	(((val1).ary[0] & (val2.ary[0])) || \
96 					 ((val1).ary[1] & (val2.ary[1])) || \
97 					 ((val1).ary[2] & (val2.ary[2])) || \
98 					 ((val1).ary[3] & (val2.ary[3])))
99 
100 #define CPUMASK_LOWMASK(val)		((val).ary[0])
101 
102 #define CPUMASK_ORBIT(mask, i)		((mask).ary[((i) >> 6) & 3] |= \
103 					 CPUMASK_SIMPLE((i) & 63))
104 
105 #define CPUMASK_ANDBIT(mask, i)		((mask).ary[((i) >> 6) & 3] &= \
106 					 CPUMASK_SIMPLE((i) & 63))
107 
108 #define CPUMASK_NANDBIT(mask, i)	((mask).ary[((i) >> 6) & 3] &= \
109 					 ~CPUMASK_SIMPLE((i) & 63))
110 
111 #define CPUMASK_ASSZERO(mask)		do {				\
112 					(mask).ary[0] = 0;		\
113 					(mask).ary[1] = 0;		\
114 					(mask).ary[2] = 0;		\
115 					(mask).ary[3] = 0;		\
116 					} while(0)
117 
118 #define CPUMASK_ASSALLONES(mask)	do {				\
119 					(mask).ary[0] = (__uint64_t)-1;	\
120 					(mask).ary[1] = (__uint64_t)-1;	\
121 					(mask).ary[2] = (__uint64_t)-1;	\
122 					(mask).ary[3] = (__uint64_t)-1;	\
123 					} while(0)
124 
125 #define CPUMASK_ASSBIT(mask, i)		do {				\
126 						CPUMASK_ASSZERO(mask);	\
127 						CPUMASK_ORBIT(mask, i); \
128 					} while(0)
129 
130 #define CPUMASK_ASSBMASK(mask, i)	do {				\
131 		if (i < 64) {						\
132 			(mask).ary[0] = CPUMASK_SIMPLE(i) - 1;		\
133 			(mask).ary[1] = 0;				\
134 			(mask).ary[2] = 0;				\
135 			(mask).ary[3] = 0;				\
136 		} else if (i < 128) {					\
137 			(mask).ary[0] = (__uint64_t)-1;			\
138 			(mask).ary[1] = CPUMASK_SIMPLE((i) - 64) - 1;	\
139 			(mask).ary[2] = 0;				\
140 			(mask).ary[3] = 0;				\
141 		} else if (i < 192) {					\
142 			(mask).ary[0] = (__uint64_t)-1;			\
143 			(mask).ary[1] = (__uint64_t)-1;			\
144 			(mask).ary[2] = CPUMASK_SIMPLE((i) - 128) - 1;	\
145 			(mask).ary[3] = 0;				\
146 		} else {						\
147 			(mask).ary[0] = (__uint64_t)-1;			\
148 			(mask).ary[1] = (__uint64_t)-1;			\
149 			(mask).ary[2] = (__uint64_t)-1;			\
150 			(mask).ary[3] = CPUMASK_SIMPLE((i) - 192) - 1;	\
151 		}							\
152 					} while(0)
153 
154 #define CPUMASK_ASSNBMASK(mask, i)	do {				\
155 		if (i < 64) {						\
156 			(mask).ary[0] = ~(CPUMASK_SIMPLE(i) - 1);	\
157 			(mask).ary[1] = (__uint64_t)-1;			\
158 			(mask).ary[2] = (__uint64_t)-1;			\
159 			(mask).ary[3] = (__uint64_t)-1;			\
160 		} else if (i < 128) {					\
161 			(mask).ary[0] = 0;				\
162 			(mask).ary[1] = ~(CPUMASK_SIMPLE((i) - 64) - 1);\
163 			(mask).ary[2] = (__uint64_t)-1;			\
164 			(mask).ary[3] = (__uint64_t)-1;			\
165 		} else if (i < 192) {					\
166 			(mask).ary[0] = 0;				\
167 			(mask).ary[1] = 0;				\
168 			(mask).ary[2] = ~(CPUMASK_SIMPLE((i) - 128) - 1);\
169 			(mask).ary[3] = (__uint64_t)-1;			\
170 		} else {						\
171 			(mask).ary[0] = 0;				\
172 			(mask).ary[1] = 0;				\
173 			(mask).ary[2] = 0;				\
174 			(mask).ary[3] = ~(CPUMASK_SIMPLE((i) - 192) - 1);\
175 		}							\
176 					} while(0)
177 
178 #define CPUMASK_ANDMASK(mask, val)	do {				\
179 					(mask).ary[0] &= (val).ary[0];	\
180 					(mask).ary[1] &= (val).ary[1];	\
181 					(mask).ary[2] &= (val).ary[2];	\
182 					(mask).ary[3] &= (val).ary[3];	\
183 					} while(0)
184 
185 #define CPUMASK_NANDMASK(mask, val)	do {				\
186 					(mask).ary[0] &= ~(val).ary[0];	\
187 					(mask).ary[1] &= ~(val).ary[1];	\
188 					(mask).ary[2] &= ~(val).ary[2];	\
189 					(mask).ary[3] &= ~(val).ary[3];	\
190 					} while(0)
191 
192 #define CPUMASK_ORMASK(mask, val)	do {				\
193 					(mask).ary[0] |= (val).ary[0];	\
194 					(mask).ary[1] |= (val).ary[1];	\
195 					(mask).ary[2] |= (val).ary[2];	\
196 					(mask).ary[3] |= (val).ary[3];	\
197 					} while(0)
198 
199 #define CPUMASK_XORMASK(mask, val)	do {				\
200 					(mask).ary[0] ^= (val).ary[0];	\
201 					(mask).ary[1] ^= (val).ary[1];	\
202 					(mask).ary[2] ^= (val).ary[2];	\
203 					(mask).ary[3] ^= (val).ary[3];	\
204 					} while(0)
205 
206 #define ATOMIC_CPUMASK_ORBIT(mask, i)					  \
207 			atomic_set_cpumask(&(mask).ary[((i) >> 6) & 3],	  \
208 					   CPUMASK_SIMPLE((i) & 63))
209 
210 #define ATOMIC_CPUMASK_NANDBIT(mask, i)					  \
211 			atomic_clear_cpumask(&(mask).ary[((i) >> 6) & 3], \
212 					   CPUMASK_SIMPLE((i) & 63))
213 
214 #define ATOMIC_CPUMASK_TESTANDSET(mask, i)				  \
215 		atomic_testandset_long(&(mask).ary[((i) >> 6) & 3], (i))
216 
217 #define ATOMIC_CPUMASK_TESTANDCLR(mask, i)				  \
218 		atomic_testandclear_long(&(mask).ary[((i) >> 6) & 3], (i))
219 
220 #define ATOMIC_CPUMASK_ORMASK(mask, val) do {				  \
221 			atomic_set_cpumask(&(mask).ary[0], (val).ary[0]); \
222 			atomic_set_cpumask(&(mask).ary[1], (val).ary[1]); \
223 			atomic_set_cpumask(&(mask).ary[2], (val).ary[2]); \
224 			atomic_set_cpumask(&(mask).ary[3], (val).ary[3]); \
225 					 } while(0)
226 
227 #define ATOMIC_CPUMASK_NANDMASK(mask, val) do {				    \
228 			atomic_clear_cpumask(&(mask).ary[0], (val).ary[0]); \
229 			atomic_clear_cpumask(&(mask).ary[1], (val).ary[1]); \
230 			atomic_clear_cpumask(&(mask).ary[2], (val).ary[2]); \
231 			atomic_clear_cpumask(&(mask).ary[3], (val).ary[3]); \
232 					 } while(0)
233 
234 #define ATOMIC_CPUMASK_COPY(mask, val) do {				    \
235 			atomic_store_rel_cpumask(&(mask).ary[0], (val).ary[0]);\
236 			atomic_store_rel_cpumask(&(mask).ary[1], (val).ary[1]);\
237 			atomic_store_rel_cpumask(&(mask).ary[2], (val).ary[2]);\
238 			atomic_store_rel_cpumask(&(mask).ary[3], (val).ary[3]);\
239 					 } while(0)
240 
241 #endif /* !_CPU_CPUMASK_H_ */
242