1 /* $NetBSD: bitops.h,v 1.16 2021/12/19 11:03:01 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_BITOPS_H_
33 #define _LINUX_BITOPS_H_
34
35 #include <sys/cdefs.h>
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/atomic.h>
39 #include <sys/bitops.h>
40
41 #include <machine/limits.h>
42
43 #include <lib/libkern/libkern.h>
44
45 #include <asm/barrier.h>
46
47 #include <linux/bits.h>
48
49 /*
50 * Linux __ffs/__ffs64 is zero-based; zero input is undefined. Our
51 * ffs/ffs64 is one-based; zero input yields zero.
52 */
53 static inline unsigned long
__ffs(unsigned long x)54 __ffs(unsigned long x)
55 {
56
57 KASSERT(x != 0);
58 return ffs64(x) - 1;
59 }
60
61 static inline unsigned long
__ffs64(uint64_t x)62 __ffs64(uint64_t x)
63 {
64
65 KASSERT(x != 0);
66 return ffs64(x) - 1;
67 }
68
69 /*
70 * Linux fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32, so it matches
71 * our fls semantics.
72 */
73 static inline int
fls(int x)74 fls(int x)
75 {
76 return fls32(x);
77 }
78
79 static inline unsigned int
hweight8(uint8_t w)80 hweight8(uint8_t w)
81 {
82 return popcount(w & 0xff);
83 }
84
85 static inline unsigned int
hweight16(uint16_t n)86 hweight16(uint16_t n)
87 {
88 return popcount32(n);
89 }
90
91 static inline unsigned int
hweight32(uint32_t n)92 hweight32(uint32_t n)
93 {
94 return popcount32(n);
95 }
96
97 static inline unsigned int
hweight64(uint64_t n)98 hweight64(uint64_t n)
99 {
100 return popcount64(n);
101 }
102
103 static inline int64_t
sign_extend64(uint64_t x,unsigned n)104 sign_extend64(uint64_t x, unsigned n)
105 {
106 return (int64_t)(x << (63 - n)) >> (63 - n);
107 }
108
109 #define BITS_TO_LONGS(n) \
110 roundup2((n), (sizeof(unsigned long) * CHAR_BIT))
111
112 #define BITS_PER_TYPE(type) (sizeof(type) * NBBY)
113 #define BITS_PER_BYTE NBBY
114 #define BITS_PER_LONG (__SIZEOF_LONG__ * CHAR_BIT)
115
116 #define BIT(n) ((unsigned long)__BIT(n))
117 #define BIT_ULL(n) ((unsigned long long)__BIT(n))
118 #define GENMASK(h,l) ((unsigned long)__BITS(h,l))
119 #define GENMASK_ULL(h,l)((unsigned long long)__BITS(h,l))
120
121 static inline int
test_bit(unsigned int n,const volatile unsigned long * p)122 test_bit(unsigned int n, const volatile unsigned long *p)
123 {
124 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
125
126 return ((p[n / units] & (1UL << (n % units))) != 0);
127 }
128
129 static inline void
__set_bit(unsigned int n,volatile unsigned long * p)130 __set_bit(unsigned int n, volatile unsigned long *p)
131 {
132 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
133
134 p[n / units] |= (1UL << (n % units));
135 }
136
137 static inline void
__clear_bit(unsigned int n,volatile unsigned long * p)138 __clear_bit(unsigned int n, volatile unsigned long *p)
139 {
140 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
141
142 p[n / units] &= ~(1UL << (n % units));
143 }
144
145 static inline void
__change_bit(unsigned int n,volatile unsigned long * p)146 __change_bit(unsigned int n, volatile unsigned long *p)
147 {
148 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
149
150 p[n / units] ^= (1UL << (n % units));
151 }
152
153 static inline unsigned long
__test_and_set_bit(unsigned int bit,volatile unsigned long * ptr)154 __test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
155 {
156 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
157 volatile unsigned long *const p = &ptr[bit / units];
158 const unsigned long mask = (1UL << (bit % units));
159 unsigned long v;
160
161 v = *p;
162 *p |= mask;
163
164 return ((v & mask) != 0);
165 }
166
167 static inline unsigned long
__test_and_clear_bit(unsigned int bit,volatile unsigned long * ptr)168 __test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
169 {
170 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
171 volatile unsigned long *const p = &ptr[bit / units];
172 const unsigned long mask = (1UL << (bit % units));
173 unsigned long v;
174
175 v = *p;
176 *p &= ~mask;
177
178 return ((v & mask) != 0);
179 }
180
181 static inline unsigned long
__test_and_change_bit(unsigned int bit,volatile unsigned long * ptr)182 __test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
183 {
184 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
185 volatile unsigned long *const p = &ptr[bit / units];
186 const unsigned long mask = (1UL << (bit % units));
187 unsigned long v;
188
189 v = *p;
190 *p ^= mask;
191
192 return ((v & mask) != 0);
193 }
194
195 static inline unsigned long
__find_next_bit(const unsigned long * ptr,unsigned long nbits,unsigned long startbit,unsigned long toggle)196 __find_next_bit(const unsigned long *ptr, unsigned long nbits,
197 unsigned long startbit, unsigned long toggle)
198 {
199 const size_t bpl = (CHAR_BIT * sizeof(*ptr));
200 const unsigned long *p = ptr + startbit/bpl;
201 size_t n = howmany(nbits, bpl);
202 unsigned long result;
203 uint64_t word;
204
205 /*
206 * We use ffs64 because NetBSD doesn't have a handy ffsl that
207 * works on unsigned long. This is a waste on 32-bit systems
208 * but I'd rather not maintain multiple copies of this -- the
209 * first version had enough bugs already.
210 */
211
212 /* Do we need to examine a partial starting word? */
213 if (startbit % bpl) {
214 /* Toggle the bits and convert to 64 bits for ffs64. */
215 word = *p ^ toggle;
216
217 /* Clear the low startbit%bpl bits. */
218 word &= (~0UL << (startbit % bpl));
219
220 /* Are any of these bits set now? */
221 if (word)
222 goto out;
223
224 /* Move past it. */
225 p++;
226 n--;
227 }
228
229 /* Find the first word with any bits set. */
230 for (; n --> 0; p++) {
231 /* Toggle the bits and convert to 64 bits for ffs64. */
232 word = *p ^ toggle;
233
234 /* Are any of these bits set now? */
235 if (word)
236 goto out;
237 }
238
239 /* Nada. */
240 return nbits;
241
242 out:
243 /* Count how many words we've skipped. */
244 result = bpl*(p - ptr);
245
246 /* Find the first set bit in this word, zero-based. */
247 result += ffs64(word) - 1;
248
249 /* We may have overshot, so clamp down to at most nbits. */
250 return MIN(result, nbits);
251 }
252
253 static inline unsigned long
find_next_bit(const unsigned long * ptr,unsigned long nbits,unsigned long startbit)254 find_next_bit(const unsigned long *ptr, unsigned long nbits,
255 unsigned long startbit)
256 {
257 return __find_next_bit(ptr, nbits, startbit, 0);
258 }
259
260 static inline unsigned long
find_first_bit(const unsigned long * ptr,unsigned long nbits)261 find_first_bit(const unsigned long *ptr, unsigned long nbits)
262 {
263 return find_next_bit(ptr, nbits, 0);
264 }
265
266 static inline unsigned long
find_next_zero_bit(const unsigned long * ptr,unsigned long nbits,unsigned long startbit)267 find_next_zero_bit(const unsigned long *ptr, unsigned long nbits,
268 unsigned long startbit)
269 {
270 return __find_next_bit(ptr, nbits, startbit, ~0UL);
271 }
272
273 static inline unsigned long
find_first_zero_bit(const unsigned long * ptr,unsigned long nbits)274 find_first_zero_bit(const unsigned long *ptr, unsigned long nbits)
275 {
276 return find_next_zero_bit(ptr, nbits, 0);
277 }
278
279 #define for_each_set_bit(BIT, PTR, NBITS) \
280 for ((BIT) = find_first_bit((PTR), (NBITS)); \
281 (BIT) < (NBITS); \
282 (BIT) = find_next_bit((PTR), (NBITS), (BIT) + 1))
283
284 #define for_each_clear_bit(BIT, PTR, NBITS) \
285 for ((BIT) = find_first_zero_bit((PTR), (NBITS)); \
286 (BIT) < (NBITS); \
287 (BIT) = find_next_zero_bit((PTR), (NBITS), (BIT) + 1))
288
289 static inline void
set_bit(unsigned int bit,volatile unsigned long * ptr)290 set_bit(unsigned int bit, volatile unsigned long *ptr)
291 {
292 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
293
294 /* no memory barrier */
295 atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
296 }
297
298 static inline void
clear_bit(unsigned int bit,volatile unsigned long * ptr)299 clear_bit(unsigned int bit, volatile unsigned long *ptr)
300 {
301 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
302
303 /* no memory barrier */
304 atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
305 }
306
307 static inline void
clear_bit_unlock(unsigned int bit,volatile unsigned long * ptr)308 clear_bit_unlock(unsigned int bit, volatile unsigned long *ptr)
309 {
310 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
311
312 /* store-release */
313 smp_mb__before_atomic();
314 atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
315 }
316
317 static inline void
change_bit(unsigned int bit,volatile unsigned long * ptr)318 change_bit(unsigned int bit, volatile unsigned long *ptr)
319 {
320 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
321 volatile unsigned long *const p = &ptr[bit / units];
322 const unsigned long mask = (1UL << (bit % units));
323 unsigned long v;
324
325 /* no memory barrier */
326 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
327 }
328
329 static inline int
test_and_set_bit(unsigned int bit,volatile unsigned long * ptr)330 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
331 {
332 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
333 volatile unsigned long *const p = &ptr[bit / units];
334 const unsigned long mask = (1UL << (bit % units));
335 unsigned long v;
336
337 smp_mb__before_atomic();
338 do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
339 smp_mb__after_atomic();
340
341 return ((v & mask) != 0);
342 }
343
344 static inline int
test_and_clear_bit(unsigned int bit,volatile unsigned long * ptr)345 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
346 {
347 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
348 volatile unsigned long *const p = &ptr[bit / units];
349 const unsigned long mask = (1UL << (bit % units));
350 unsigned long v;
351
352 smp_mb__before_atomic();
353 do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
354 smp_mb__after_atomic();
355
356 return ((v & mask) != 0);
357 }
358
359 static inline int
test_and_change_bit(unsigned int bit,volatile unsigned long * ptr)360 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
361 {
362 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
363 volatile unsigned long *const p = &ptr[bit / units];
364 const unsigned long mask = (1UL << (bit % units));
365 unsigned long v;
366
367 smp_mb__before_atomic();
368 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
369 smp_mb__after_atomic();
370
371 return ((v & mask) != 0);
372 }
373
374 #endif /* _LINUX_BITOPS_H_ */
375