1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_BITOPS_H_
32 #define	_LINUX_BITOPS_H_
33 
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/errno.h>
38 #include <sys/libkern.h>
39 
40 #define	BIT(nr)			(1UL << (nr))
41 #define	BIT_ULL(nr)		(1ULL << (nr))
42 #ifdef __LP64__
43 #define	BITS_PER_LONG		64
44 #else
45 #define	BITS_PER_LONG		32
46 #endif
47 
48 #define	BITMAP_FIRST_WORD_MASK(start)	(~0UL << ((start) % BITS_PER_LONG))
49 #define	BITMAP_LAST_WORD_MASK(n)	(~0UL >> (BITS_PER_LONG - (n)))
50 #define	BITS_TO_LONGS(n)	howmany((n), BITS_PER_LONG)
51 #define	BIT_MASK(nr)		(1UL << ((nr) & (BITS_PER_LONG - 1)))
52 #define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
53 #define	GENMASK(h, l)		(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
54 #define BITS_PER_BYTE           8
55 
56 #define	hweight8(x)	bitcount((uint8_t)(x))
57 #define	hweight16(x)	bitcount16(x)
58 #define	hweight32(x)	bitcount32(x)
59 #define	hweight64(x)	bitcount64(x)
60 #define	hweight_long(x)	bitcountl(x)
61 
62 static inline int
63 __ffs(int mask)
64 {
65 	return (ffs(mask) - 1);
66 }
67 
68 static inline int
69 __fls(int mask)
70 {
71 	return (fls(mask) - 1);
72 }
73 
74 static inline int
75 __ffsl(long mask)
76 {
77 	return (ffsl(mask) - 1);
78 }
79 
80 static inline int
81 __flsl(long mask)
82 {
83 	return (flsl(mask) - 1);
84 }
85 
86 static inline int
87 fls64(uint64_t mask)
88 {
89 	return (flsll(mask));
90 }
91 
92 static inline uint32_t
93 ror32(uint32_t word, unsigned int shift)
94 {
95 	return ((word >> shift) | (word << (32 - shift)));
96 }
97 
98 #define	ffz(mask)	__ffs(~(mask))
99 
100 static inline int get_count_order(unsigned int count)
101 {
102         int order;
103 
104         order = fls(count) - 1;
105         if (count & (count - 1))
106                 order++;
107         return order;
108 }
109 
110 static inline unsigned long
111 find_first_bit(const unsigned long *addr, unsigned long size)
112 {
113 	long mask;
114 	int bit;
115 
116 	for (bit = 0; size >= BITS_PER_LONG;
117 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
118 		if (*addr == 0)
119 			continue;
120 		return (bit + __ffsl(*addr));
121 	}
122 	if (size) {
123 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
124 		if (mask)
125 			bit += __ffsl(mask);
126 		else
127 			bit += size;
128 	}
129 	return (bit);
130 }
131 
132 static inline unsigned long
133 find_first_zero_bit(const unsigned long *addr, unsigned long size)
134 {
135 	long mask;
136 	int bit;
137 
138 	for (bit = 0; size >= BITS_PER_LONG;
139 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
140 		if (~(*addr) == 0)
141 			continue;
142 		return (bit + __ffsl(~(*addr)));
143 	}
144 	if (size) {
145 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
146 		if (mask)
147 			bit += __ffsl(mask);
148 		else
149 			bit += size;
150 	}
151 	return (bit);
152 }
153 
154 static inline unsigned long
155 find_last_bit(const unsigned long *addr, unsigned long size)
156 {
157 	long mask;
158 	int offs;
159 	int bit;
160 	int pos;
161 
162 	pos = size / BITS_PER_LONG;
163 	offs = size % BITS_PER_LONG;
164 	bit = BITS_PER_LONG * pos;
165 	addr += pos;
166 	if (offs) {
167 		mask = (*addr) & BITMAP_LAST_WORD_MASK(offs);
168 		if (mask)
169 			return (bit + __flsl(mask));
170 	}
171 	while (pos--) {
172 		addr--;
173 		bit -= BITS_PER_LONG;
174 		if (*addr)
175 			return (bit + __flsl(*addr));
176 	}
177 	return (size);
178 }
179 
180 static inline unsigned long
181 find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
182 {
183 	long mask;
184 	int offs;
185 	int bit;
186 	int pos;
187 
188 	if (offset >= size)
189 		return (size);
190 	pos = offset / BITS_PER_LONG;
191 	offs = offset % BITS_PER_LONG;
192 	bit = BITS_PER_LONG * pos;
193 	addr += pos;
194 	if (offs) {
195 		mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
196 		if (mask)
197 			return (bit + __ffsl(mask));
198 		if (size - bit <= BITS_PER_LONG)
199 			return (size);
200 		bit += BITS_PER_LONG;
201 		addr++;
202 	}
203 	for (size -= bit; size >= BITS_PER_LONG;
204 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
205 		if (*addr == 0)
206 			continue;
207 		return (bit + __ffsl(*addr));
208 	}
209 	if (size) {
210 		mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
211 		if (mask)
212 			bit += __ffsl(mask);
213 		else
214 			bit += size;
215 	}
216 	return (bit);
217 }
218 
219 static inline unsigned long
220 find_next_zero_bit(const unsigned long *addr, unsigned long size,
221     unsigned long offset)
222 {
223 	long mask;
224 	int offs;
225 	int bit;
226 	int pos;
227 
228 	if (offset >= size)
229 		return (size);
230 	pos = offset / BITS_PER_LONG;
231 	offs = offset % BITS_PER_LONG;
232 	bit = BITS_PER_LONG * pos;
233 	addr += pos;
234 	if (offs) {
235 		mask = ~(*addr) & ~BITMAP_LAST_WORD_MASK(offs);
236 		if (mask)
237 			return (bit + __ffsl(mask));
238 		if (size - bit <= BITS_PER_LONG)
239 			return (size);
240 		bit += BITS_PER_LONG;
241 		addr++;
242 	}
243 	for (size -= bit; size >= BITS_PER_LONG;
244 	    size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
245 		if (~(*addr) == 0)
246 			continue;
247 		return (bit + __ffsl(~(*addr)));
248 	}
249 	if (size) {
250 		mask = ~(*addr) & BITMAP_LAST_WORD_MASK(size);
251 		if (mask)
252 			bit += __ffsl(mask);
253 		else
254 			bit += size;
255 	}
256 	return (bit);
257 }
258 
259 #define	__set_bit(i, a)							\
260     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
261 
262 #define	set_bit(i, a)							\
263     atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
264 
265 #define	__clear_bit(i, a)						\
266     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
267 
268 #define	clear_bit(i, a)							\
269     atomic_clear_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
270 
271 #define	test_bit(i, a)							\
272     !!(atomic_load_acq_long(&((volatile unsigned long *)(a))[BIT_WORD(i)]) &	\
273     BIT_MASK(i))
274 
275 static inline int
276 test_and_clear_bit(long bit, volatile unsigned long *var)
277 {
278 	long val;
279 
280 	var += BIT_WORD(bit);
281 	bit %= BITS_PER_LONG;
282 	bit = (1UL << bit);
283 	do {
284 		val = *var;
285 	} while (atomic_cmpset_long(var, val, val & ~bit) == 0);
286 
287 	return !!(val & bit);
288 }
289 
290 static inline int
291 __test_and_clear_bit(long bit, volatile unsigned long *var)
292 {
293 	long val;
294 
295 	var += BIT_WORD(bit);
296 	bit %= BITS_PER_LONG;
297 	bit = (1UL << bit);
298 
299 	val = *var;
300 	*var &= ~bit;
301 
302 	return !!(val & bit);
303 }
304 
305 static inline int
306 test_and_set_bit(long bit, volatile unsigned long *var)
307 {
308 	long val;
309 
310 	var += BIT_WORD(bit);
311 	bit %= BITS_PER_LONG;
312 	bit = (1UL << bit);
313 	do {
314 		val = *var;
315 	} while (atomic_cmpset_long(var, val, val | bit) == 0);
316 
317 	return !!(val & bit);
318 }
319 
320 static inline int
321 __test_and_set_bit(long bit, volatile unsigned long *var)
322 {
323 	long val;
324 
325 	var += BIT_WORD(bit);
326 	bit %= BITS_PER_LONG;
327 	bit = (1UL << bit);
328 
329 	val = *var;
330 	*var |= bit;
331 
332 	return !!(val & bit);
333 }
334 
335 enum {
336         REG_OP_ISFREE,
337         REG_OP_ALLOC,
338         REG_OP_RELEASE,
339 };
340 
341 static inline int
342 linux_reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
343 {
344         int nbits_reg;
345         int index;
346         int offset;
347         int nlongs_reg;
348         int nbitsinlong;
349         unsigned long mask;
350         int i;
351         int ret = 0;
352 
353         nbits_reg = 1 << order;
354         index = pos / BITS_PER_LONG;
355         offset = pos - (index * BITS_PER_LONG);
356         nlongs_reg = BITS_TO_LONGS(nbits_reg);
357         nbitsinlong = min(nbits_reg,  BITS_PER_LONG);
358 
359         mask = (1UL << (nbitsinlong - 1));
360         mask += mask - 1;
361         mask <<= offset;
362 
363         switch (reg_op) {
364         case REG_OP_ISFREE:
365                 for (i = 0; i < nlongs_reg; i++) {
366                         if (bitmap[index + i] & mask)
367                                 goto done;
368                 }
369                 ret = 1;
370                 break;
371 
372         case REG_OP_ALLOC:
373                 for (i = 0; i < nlongs_reg; i++)
374                         bitmap[index + i] |= mask;
375                 break;
376 
377         case REG_OP_RELEASE:
378                 for (i = 0; i < nlongs_reg; i++)
379                         bitmap[index + i] &= ~mask;
380                 break;
381         }
382 done:
383         return ret;
384 }
385 
386 #define for_each_set_bit(bit, addr, size) \
387 	for ((bit) = find_first_bit((addr), (size));		\
388 	     (bit) < (size);					\
389 	     (bit) = find_next_bit((addr), (size), (bit) + 1))
390 
391 
392 static inline uint64_t
393 sign_extend64(uint64_t value, int index)
394 {
395 	uint8_t shift = 63 - index;
396 
397 	return ((int64_t)(value << shift) >> shift);
398 }
399 
400 #endif	/* _LINUX_BITOPS_H_ */
401