xref: /linux/include/linux/nodemask.h (revision 54c9e008)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_NODEMASK_H
3 #define __LINUX_NODEMASK_H
4 
5 /*
6  * Nodemasks provide a bitmap suitable for representing the
7  * set of Node's in a system, one bit position per Node number.
8  *
9  * See detailed comments in the file linux/bitmap.h describing the
10  * data type on which these nodemasks are based.
11  *
12  * For details of nodemask_parse_user(), see bitmap_parse_user() in
13  * lib/bitmap.c.  For details of nodelist_parse(), see bitmap_parselist(),
14  * also in bitmap.c.  For details of node_remap(), see bitmap_bitremap in
15  * lib/bitmap.c.  For details of nodes_remap(), see bitmap_remap in
16  * lib/bitmap.c.  For details of nodes_onto(), see bitmap_onto in
17  * lib/bitmap.c.  For details of nodes_fold(), see bitmap_fold in
18  * lib/bitmap.c.
19  *
20  * The available nodemask operations are:
21  *
22  * void node_set(node, mask)		turn on bit 'node' in mask
23  * void node_clear(node, mask)		turn off bit 'node' in mask
24  * void nodes_setall(mask)		set all bits
25  * void nodes_clear(mask)		clear all bits
26  * int node_isset(node, mask)		true iff bit 'node' set in mask
27  * int node_test_and_set(node, mask)	test and set bit 'node' in mask
28  *
29  * void nodes_and(dst, src1, src2)	dst = src1 & src2  [intersection]
30  * void nodes_or(dst, src1, src2)	dst = src1 | src2  [union]
31  * void nodes_xor(dst, src1, src2)	dst = src1 ^ src2
32  * void nodes_andnot(dst, src1, src2)	dst = src1 & ~src2
33  * void nodes_complement(dst, src)	dst = ~src
34  *
35  * int nodes_equal(mask1, mask2)	Does mask1 == mask2?
36  * int nodes_intersects(mask1, mask2)	Do mask1 and mask2 intersect?
37  * int nodes_subset(mask1, mask2)	Is mask1 a subset of mask2?
38  * int nodes_empty(mask)		Is mask empty (no bits sets)?
39  * int nodes_full(mask)			Is mask full (all bits sets)?
40  * int nodes_weight(mask)		Hamming weight - number of set bits
41  *
42  * void nodes_shift_right(dst, src, n)	Shift right
43  * void nodes_shift_left(dst, src, n)	Shift left
44  *
45  * unsigned int first_node(mask)	Number lowest set bit, or MAX_NUMNODES
46  * unsigend int next_node(node, mask)	Next node past 'node', or MAX_NUMNODES
47  * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
48  *					or MAX_NUMNODES
49  * unsigned int first_unset_node(mask)	First node not set in mask, or
50  *					MAX_NUMNODES
51  *
52  * nodemask_t nodemask_of_node(node)	Return nodemask with bit 'node' set
53  * NODE_MASK_ALL			Initializer - all bits set
54  * NODE_MASK_NONE			Initializer - no bits set
55  * unsigned long *nodes_addr(mask)	Array of unsigned long's in mask
56  *
57  * int nodemask_parse_user(ubuf, ulen, mask)	Parse ascii string as nodemask
58  * int nodelist_parse(buf, map)		Parse ascii string as nodelist
59  * int node_remap(oldbit, old, new)	newbit = map(old, new)(oldbit)
60  * void nodes_remap(dst, src, old, new)	*dst = map(old, new)(src)
61  * void nodes_onto(dst, orig, relmap)	*dst = orig relative to relmap
62  * void nodes_fold(dst, orig, sz)	dst bits = orig bits mod sz
63  *
64  * for_each_node_mask(node, mask)	for-loop node over mask
65  *
66  * int num_online_nodes()		Number of online Nodes
67  * int num_possible_nodes()		Number of all possible Nodes
68  *
69  * int node_random(mask)		Random node with set bit in mask
70  *
71  * int node_online(node)		Is some node online?
72  * int node_possible(node)		Is some node possible?
73  *
74  * node_set_online(node)		set bit 'node' in node_online_map
75  * node_set_offline(node)		clear bit 'node' in node_online_map
76  *
77  * for_each_node(node)			for-loop node over node_possible_map
78  * for_each_online_node(node)		for-loop node over node_online_map
79  *
80  * Subtlety:
81  * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
82  *    to generate slightly worse code.  So use a simple one-line #define
83  *    for node_isset(), instead of wrapping an inline inside a macro, the
84  *    way we do the other calls.
85  *
86  * NODEMASK_SCRATCH
87  * When doing above logical AND, OR, XOR, Remap operations the callers tend to
88  * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
89  * nodemask_t's consume too much stack space.  NODEMASK_SCRATCH is a helper
90  * for such situations. See below and CPUMASK_ALLOC also.
91  */
92 
93 #include <linux/threads.h>
94 #include <linux/bitmap.h>
95 #include <linux/minmax.h>
96 #include <linux/nodemask_types.h>
97 #include <linux/numa.h>
98 #include <linux/random.h>
99 
100 extern nodemask_t _unused_nodemask_arg_;
101 
102 /**
103  * nodemask_pr_args - printf args to output a nodemask
104  * @maskp: nodemask to be printed
105  *
106  * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
107  */
108 #define nodemask_pr_args(maskp)	__nodemask_pr_numnodes(maskp), \
109 				__nodemask_pr_bits(maskp)
__nodemask_pr_numnodes(const nodemask_t * m)110 static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
111 {
112 	return m ? MAX_NUMNODES : 0;
113 }
__nodemask_pr_bits(const nodemask_t * m)114 static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
115 {
116 	return m ? m->bits : NULL;
117 }
118 
119 /*
120  * The inline keyword gives the compiler room to decide to inline, or
121  * not inline a function as it sees best.  However, as these functions
122  * are called in both __init and non-__init functions, if they are not
123  * inlined we will end up with a section mismatch error (of the type of
124  * freeable items not being freed).  So we must use __always_inline here
125  * to fix the problem.  If other functions in the future also end up in
126  * this situation they will also need to be annotated as __always_inline
127  */
128 #define node_set(node, dst) __node_set((node), &(dst))
__node_set(int node,volatile nodemask_t * dstp)129 static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
130 {
131 	set_bit(node, dstp->bits);
132 }
133 
134 #define node_clear(node, dst) __node_clear((node), &(dst))
__node_clear(int node,volatile nodemask_t * dstp)135 static __always_inline void __node_clear(int node, volatile nodemask_t *dstp)
136 {
137 	clear_bit(node, dstp->bits);
138 }
139 
140 #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
__nodes_setall(nodemask_t * dstp,unsigned int nbits)141 static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
142 {
143 	bitmap_fill(dstp->bits, nbits);
144 }
145 
146 #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
__nodes_clear(nodemask_t * dstp,unsigned int nbits)147 static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
148 {
149 	bitmap_zero(dstp->bits, nbits);
150 }
151 
152 /* No static inline type checking - see Subtlety (1) above. */
153 #define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
154 
155 #define node_test_and_set(node, nodemask) \
156 			__node_test_and_set((node), &(nodemask))
__node_test_and_set(int node,nodemask_t * addr)157 static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
158 {
159 	return test_and_set_bit(node, addr->bits);
160 }
161 
162 #define nodes_and(dst, src1, src2) \
163 			__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
__nodes_and(nodemask_t * dstp,const nodemask_t * src1p,const nodemask_t * src2p,unsigned int nbits)164 static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
165 					const nodemask_t *src2p, unsigned int nbits)
166 {
167 	bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
168 }
169 
170 #define nodes_or(dst, src1, src2) \
171 			__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
__nodes_or(nodemask_t * dstp,const nodemask_t * src1p,const nodemask_t * src2p,unsigned int nbits)172 static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
173 					const nodemask_t *src2p, unsigned int nbits)
174 {
175 	bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
176 }
177 
178 #define nodes_xor(dst, src1, src2) \
179 			__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
__nodes_xor(nodemask_t * dstp,const nodemask_t * src1p,const nodemask_t * src2p,unsigned int nbits)180 static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
181 					const nodemask_t *src2p, unsigned int nbits)
182 {
183 	bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
184 }
185 
186 #define nodes_andnot(dst, src1, src2) \
187 			__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
__nodes_andnot(nodemask_t * dstp,const nodemask_t * src1p,const nodemask_t * src2p,unsigned int nbits)188 static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
189 					const nodemask_t *src2p, unsigned int nbits)
190 {
191 	bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
192 }
193 
194 #define nodes_complement(dst, src) \
195 			__nodes_complement(&(dst), &(src), MAX_NUMNODES)
__nodes_complement(nodemask_t * dstp,const nodemask_t * srcp,unsigned int nbits)196 static __always_inline void __nodes_complement(nodemask_t *dstp,
197 					const nodemask_t *srcp, unsigned int nbits)
198 {
199 	bitmap_complement(dstp->bits, srcp->bits, nbits);
200 }
201 
202 #define nodes_equal(src1, src2) \
203 			__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
__nodes_equal(const nodemask_t * src1p,const nodemask_t * src2p,unsigned int nbits)204 static __always_inline bool __nodes_equal(const nodemask_t *src1p,
205 					const nodemask_t *src2p, unsigned int nbits)
206 {
207 	return bitmap_equal(src1p->bits, src2p->bits, nbits);
208 }
209 
210 #define nodes_intersects(src1, src2) \
211 			__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
__nodes_intersects(const nodemask_t * src1p,const nodemask_t * src2p,unsigned int nbits)212 static __always_inline bool __nodes_intersects(const nodemask_t *src1p,
213 					const nodemask_t *src2p, unsigned int nbits)
214 {
215 	return bitmap_intersects(src1p->bits, src2p->bits, nbits);
216 }
217 
218 #define nodes_subset(src1, src2) \
219 			__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
__nodes_subset(const nodemask_t * src1p,const nodemask_t * src2p,unsigned int nbits)220 static __always_inline bool __nodes_subset(const nodemask_t *src1p,
221 					const nodemask_t *src2p, unsigned int nbits)
222 {
223 	return bitmap_subset(src1p->bits, src2p->bits, nbits);
224 }
225 
226 #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
__nodes_empty(const nodemask_t * srcp,unsigned int nbits)227 static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
228 {
229 	return bitmap_empty(srcp->bits, nbits);
230 }
231 
232 #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
__nodes_full(const nodemask_t * srcp,unsigned int nbits)233 static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
234 {
235 	return bitmap_full(srcp->bits, nbits);
236 }
237 
238 #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
__nodes_weight(const nodemask_t * srcp,unsigned int nbits)239 static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
240 {
241 	return bitmap_weight(srcp->bits, nbits);
242 }
243 
244 #define nodes_shift_right(dst, src, n) \
245 			__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
__nodes_shift_right(nodemask_t * dstp,const nodemask_t * srcp,int n,int nbits)246 static __always_inline void __nodes_shift_right(nodemask_t *dstp,
247 					const nodemask_t *srcp, int n, int nbits)
248 {
249 	bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
250 }
251 
252 #define nodes_shift_left(dst, src, n) \
253 			__nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
__nodes_shift_left(nodemask_t * dstp,const nodemask_t * srcp,int n,int nbits)254 static __always_inline void __nodes_shift_left(nodemask_t *dstp,
255 					const nodemask_t *srcp, int n, int nbits)
256 {
257 	bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
258 }
259 
260 /* FIXME: better would be to fix all architectures to never return
261           > MAX_NUMNODES, then the silly min_ts could be dropped. */
262 
263 #define first_node(src) __first_node(&(src))
__first_node(const nodemask_t * srcp)264 static __always_inline unsigned int __first_node(const nodemask_t *srcp)
265 {
266 	return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
267 }
268 
269 #define next_node(n, src) __next_node((n), &(src))
__next_node(int n,const nodemask_t * srcp)270 static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
271 {
272 	return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
273 }
274 
275 /*
276  * Find the next present node in src, starting after node n, wrapping around to
277  * the first node in src if needed.  Returns MAX_NUMNODES if src is empty.
278  */
279 #define next_node_in(n, src) __next_node_in((n), &(src))
__next_node_in(int node,const nodemask_t * srcp)280 static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
281 {
282 	unsigned int ret = __next_node(node, srcp);
283 
284 	if (ret == MAX_NUMNODES)
285 		ret = __first_node(srcp);
286 	return ret;
287 }
288 
init_nodemask_of_node(nodemask_t * mask,int node)289 static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
290 {
291 	nodes_clear(*mask);
292 	node_set(node, *mask);
293 }
294 
295 #define nodemask_of_node(node)						\
296 ({									\
297 	typeof(_unused_nodemask_arg_) m;				\
298 	if (sizeof(m) == sizeof(unsigned long)) {			\
299 		m.bits[0] = 1UL << (node);				\
300 	} else {							\
301 		init_nodemask_of_node(&m, (node));			\
302 	}								\
303 	m;								\
304 })
305 
306 #define first_unset_node(mask) __first_unset_node(&(mask))
__first_unset_node(const nodemask_t * maskp)307 static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
308 {
309 	return min_t(unsigned int, MAX_NUMNODES,
310 			find_first_zero_bit(maskp->bits, MAX_NUMNODES));
311 }
312 
313 #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
314 
315 #if MAX_NUMNODES <= BITS_PER_LONG
316 
317 #define NODE_MASK_ALL							\
318 ((nodemask_t) { {							\
319 	[BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD		\
320 } })
321 
322 #else
323 
324 #define NODE_MASK_ALL							\
325 ((nodemask_t) { {							\
326 	[0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL,			\
327 	[BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD		\
328 } })
329 
330 #endif
331 
332 #define NODE_MASK_NONE							\
333 ((nodemask_t) { {							\
334 	[0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] =  0UL			\
335 } })
336 
337 #define nodes_addr(src) ((src).bits)
338 
339 #define nodemask_parse_user(ubuf, ulen, dst) \
340 		__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
__nodemask_parse_user(const char __user * buf,int len,nodemask_t * dstp,int nbits)341 static __always_inline int __nodemask_parse_user(const char __user *buf, int len,
342 					nodemask_t *dstp, int nbits)
343 {
344 	return bitmap_parse_user(buf, len, dstp->bits, nbits);
345 }
346 
347 #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
__nodelist_parse(const char * buf,nodemask_t * dstp,int nbits)348 static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
349 {
350 	return bitmap_parselist(buf, dstp->bits, nbits);
351 }
352 
353 #define node_remap(oldbit, old, new) \
354 		__node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
__node_remap(int oldbit,const nodemask_t * oldp,const nodemask_t * newp,int nbits)355 static __always_inline int __node_remap(int oldbit,
356 		const nodemask_t *oldp, const nodemask_t *newp, int nbits)
357 {
358 	return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
359 }
360 
361 #define nodes_remap(dst, src, old, new) \
362 		__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
__nodes_remap(nodemask_t * dstp,const nodemask_t * srcp,const nodemask_t * oldp,const nodemask_t * newp,int nbits)363 static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
364 		const nodemask_t *oldp, const nodemask_t *newp, int nbits)
365 {
366 	bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
367 }
368 
369 #define nodes_onto(dst, orig, relmap) \
370 		__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
__nodes_onto(nodemask_t * dstp,const nodemask_t * origp,const nodemask_t * relmapp,int nbits)371 static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
372 		const nodemask_t *relmapp, int nbits)
373 {
374 	bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
375 }
376 
377 #define nodes_fold(dst, orig, sz) \
378 		__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
__nodes_fold(nodemask_t * dstp,const nodemask_t * origp,int sz,int nbits)379 static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
380 		int sz, int nbits)
381 {
382 	bitmap_fold(dstp->bits, origp->bits, sz, nbits);
383 }
384 
385 #if MAX_NUMNODES > 1
386 #define for_each_node_mask(node, mask)				    \
387 	for ((node) = first_node(mask);				    \
388 	     (node) < MAX_NUMNODES;				    \
389 	     (node) = next_node((node), (mask)))
390 #else /* MAX_NUMNODES == 1 */
391 #define for_each_node_mask(node, mask)                                  \
392 	for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
393 #endif /* MAX_NUMNODES */
394 
395 /*
396  * Bitmasks that are kept for all the nodes.
397  */
398 enum node_states {
399 	N_POSSIBLE,		/* The node could become online at some point */
400 	N_ONLINE,		/* The node is online */
401 	N_NORMAL_MEMORY,	/* The node has regular memory */
402 #ifdef CONFIG_HIGHMEM
403 	N_HIGH_MEMORY,		/* The node has regular or high memory */
404 #else
405 	N_HIGH_MEMORY = N_NORMAL_MEMORY,
406 #endif
407 	N_MEMORY,		/* The node has memory(regular, high, movable) */
408 	N_CPU,		/* The node has one or more cpus */
409 	N_GENERIC_INITIATOR,	/* The node has one or more Generic Initiators */
410 	NR_NODE_STATES
411 };
412 
413 /*
414  * The following particular system nodemasks and operations
415  * on them manage all possible and online nodes.
416  */
417 
418 extern nodemask_t node_states[NR_NODE_STATES];
419 
420 #if MAX_NUMNODES > 1
node_state(int node,enum node_states state)421 static __always_inline int node_state(int node, enum node_states state)
422 {
423 	return node_isset(node, node_states[state]);
424 }
425 
node_set_state(int node,enum node_states state)426 static __always_inline void node_set_state(int node, enum node_states state)
427 {
428 	__node_set(node, &node_states[state]);
429 }
430 
node_clear_state(int node,enum node_states state)431 static __always_inline void node_clear_state(int node, enum node_states state)
432 {
433 	__node_clear(node, &node_states[state]);
434 }
435 
num_node_state(enum node_states state)436 static __always_inline int num_node_state(enum node_states state)
437 {
438 	return nodes_weight(node_states[state]);
439 }
440 
441 #define for_each_node_state(__node, __state) \
442 	for_each_node_mask((__node), node_states[__state])
443 
444 #define first_online_node	first_node(node_states[N_ONLINE])
445 #define first_memory_node	first_node(node_states[N_MEMORY])
next_online_node(int nid)446 static __always_inline unsigned int next_online_node(int nid)
447 {
448 	return next_node(nid, node_states[N_ONLINE]);
449 }
next_memory_node(int nid)450 static __always_inline unsigned int next_memory_node(int nid)
451 {
452 	return next_node(nid, node_states[N_MEMORY]);
453 }
454 
455 extern unsigned int nr_node_ids;
456 extern unsigned int nr_online_nodes;
457 
node_set_online(int nid)458 static __always_inline void node_set_online(int nid)
459 {
460 	node_set_state(nid, N_ONLINE);
461 	nr_online_nodes = num_node_state(N_ONLINE);
462 }
463 
node_set_offline(int nid)464 static __always_inline void node_set_offline(int nid)
465 {
466 	node_clear_state(nid, N_ONLINE);
467 	nr_online_nodes = num_node_state(N_ONLINE);
468 }
469 
470 #else
471 
node_state(int node,enum node_states state)472 static __always_inline int node_state(int node, enum node_states state)
473 {
474 	return node == 0;
475 }
476 
node_set_state(int node,enum node_states state)477 static __always_inline void node_set_state(int node, enum node_states state)
478 {
479 }
480 
node_clear_state(int node,enum node_states state)481 static __always_inline void node_clear_state(int node, enum node_states state)
482 {
483 }
484 
num_node_state(enum node_states state)485 static __always_inline int num_node_state(enum node_states state)
486 {
487 	return 1;
488 }
489 
490 #define for_each_node_state(node, __state) \
491 	for ( (node) = 0; (node) == 0; (node) = 1)
492 
493 #define first_online_node	0
494 #define first_memory_node	0
495 #define next_online_node(nid)	(MAX_NUMNODES)
496 #define next_memory_node(nid)	(MAX_NUMNODES)
497 #define nr_node_ids		1U
498 #define nr_online_nodes		1U
499 
500 #define node_set_online(node)	   node_set_state((node), N_ONLINE)
501 #define node_set_offline(node)	   node_clear_state((node), N_ONLINE)
502 
503 #endif
504 
node_random(const nodemask_t * maskp)505 static __always_inline int node_random(const nodemask_t *maskp)
506 {
507 #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
508 	int w, bit;
509 
510 	w = nodes_weight(*maskp);
511 	switch (w) {
512 	case 0:
513 		bit = NUMA_NO_NODE;
514 		break;
515 	case 1:
516 		bit = first_node(*maskp);
517 		break;
518 	default:
519 		bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
520 		break;
521 	}
522 	return bit;
523 #else
524 	return 0;
525 #endif
526 }
527 
528 #define node_online_map 	node_states[N_ONLINE]
529 #define node_possible_map 	node_states[N_POSSIBLE]
530 
531 #define num_online_nodes()	num_node_state(N_ONLINE)
532 #define num_possible_nodes()	num_node_state(N_POSSIBLE)
533 #define node_online(node)	node_state((node), N_ONLINE)
534 #define node_possible(node)	node_state((node), N_POSSIBLE)
535 
536 #define for_each_node(node)	   for_each_node_state(node, N_POSSIBLE)
537 #define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
538 
539 /*
540  * For nodemask scratch area.
541  * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
542  * name.
543  */
544 #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */
545 #define NODEMASK_ALLOC(type, name, gfp_flags)	\
546 			type *name = kmalloc(sizeof(*name), gfp_flags)
547 #define NODEMASK_FREE(m)			kfree(m)
548 #else
549 #define NODEMASK_ALLOC(type, name, gfp_flags)	type _##name, *name = &_##name
550 #define NODEMASK_FREE(m)			do {} while (0)
551 #endif
552 
553 /* Example structure for using NODEMASK_ALLOC, used in mempolicy. */
554 struct nodemask_scratch {
555 	nodemask_t	mask1;
556 	nodemask_t	mask2;
557 };
558 
559 #define NODEMASK_SCRATCH(x)						\
560 			NODEMASK_ALLOC(struct nodemask_scratch, x,	\
561 					GFP_KERNEL | __GFP_NORETRY)
562 #define NODEMASK_SCRATCH_FREE(x)	NODEMASK_FREE(x)
563 
564 
565 #endif /* __LINUX_NODEMASK_H */
566