xref: /freebsd/contrib/xz/src/common/tuklib_integer.h (revision 5d3e7166)
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 /// \file       tuklib_integer.h
4 /// \brief      Various integer and bit operations
5 ///
6 /// This file provides macros or functions to do some basic integer and bit
7 /// operations.
8 ///
9 /// Native endian inline functions (XX = 16, 32, or 64):
10 ///   - Unaligned native endian reads: readXXne(ptr)
11 ///   - Unaligned native endian writes: writeXXne(ptr, num)
12 ///   - Aligned native endian reads: aligned_readXXne(ptr)
13 ///   - Aligned native endian writes: aligned_writeXXne(ptr, num)
14 ///
15 /// Endianness-converting integer operations (these can be macros!)
16 /// (XX = 16, 32, or 64; Y = b or l):
17 ///   - Byte swapping: bswapXX(num)
18 ///   - Byte order conversions to/from native (byteswaps if Y isn't
19 ///     the native endianness): convXXYe(num)
20 ///   - Unaligned reads: readXXYe(ptr)
21 ///   - Unaligned writes: writeXXYe(ptr, num)
22 ///   - Aligned reads: aligned_readXXYe(ptr)
23 ///   - Aligned writes: aligned_writeXXYe(ptr, num)
24 ///
25 /// Since the above can macros, the arguments should have no side effects
26 /// because they may be evaluated more than once.
27 ///
28 /// Bit scan operations for non-zero 32-bit integers (inline functions):
29 ///   - Bit scan reverse (find highest non-zero bit): bsr32(num)
30 ///   - Count leading zeros: clz32(num)
31 ///   - Count trailing zeros: ctz32(num)
32 ///   - Bit scan forward (simply an alias for ctz32()): bsf32(num)
33 ///
34 /// The above bit scan operations return 0-31. If num is zero,
35 /// the result is undefined.
36 //
37 //  Authors:    Lasse Collin
38 //              Joachim Henke
39 //
40 //  This file has been put into the public domain.
41 //  You can do whatever you want with this file.
42 //
43 ///////////////////////////////////////////////////////////////////////////////
44 
45 #ifndef TUKLIB_INTEGER_H
46 #define TUKLIB_INTEGER_H
47 
48 #include "tuklib_common.h"
49 #include <string.h>
50 
51 // Newer Intel C compilers require immintrin.h for _bit_scan_reverse()
52 // and such functions.
53 #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500)
54 #	include <immintrin.h>
55 #endif
56 
57 
58 ///////////////////
59 // Byte swapping //
60 ///////////////////
61 
62 #if defined(HAVE___BUILTIN_BSWAPXX)
63 	// GCC >= 4.8 and Clang
64 #	define bswap16(n) __builtin_bswap16(n)
65 #	define bswap32(n) __builtin_bswap32(n)
66 #	define bswap64(n) __builtin_bswap64(n)
67 
68 #elif defined(HAVE_BYTESWAP_H)
69 	// glibc, uClibc, dietlibc
70 #	include <byteswap.h>
71 #	ifdef HAVE_BSWAP_16
72 #		define bswap16(num) bswap_16(num)
73 #	endif
74 #	ifdef HAVE_BSWAP_32
75 #		define bswap32(num) bswap_32(num)
76 #	endif
77 #	ifdef HAVE_BSWAP_64
78 #		define bswap64(num) bswap_64(num)
79 #	endif
80 
81 #elif defined(HAVE_SYS_ENDIAN_H)
82 	// *BSDs and Darwin
83 #	include <sys/endian.h>
84 
85 #elif defined(HAVE_SYS_BYTEORDER_H)
86 	// Solaris
87 #	include <sys/byteorder.h>
88 #	ifdef BSWAP_16
89 #		define bswap16(num) BSWAP_16(num)
90 #	endif
91 #	ifdef BSWAP_32
92 #		define bswap32(num) BSWAP_32(num)
93 #	endif
94 #	ifdef BSWAP_64
95 #		define bswap64(num) BSWAP_64(num)
96 #	endif
97 #	ifdef BE_16
98 #		define conv16be(num) BE_16(num)
99 #	endif
100 #	ifdef BE_32
101 #		define conv32be(num) BE_32(num)
102 #	endif
103 #	ifdef BE_64
104 #		define conv64be(num) BE_64(num)
105 #	endif
106 #	ifdef LE_16
107 #		define conv16le(num) LE_16(num)
108 #	endif
109 #	ifdef LE_32
110 #		define conv32le(num) LE_32(num)
111 #	endif
112 #	ifdef LE_64
113 #		define conv64le(num) LE_64(num)
114 #	endif
115 #endif
116 
117 #ifndef bswap16
118 #	define bswap16(n) (uint16_t)( \
119 		  (((n) & 0x00FFU) << 8) \
120 		| (((n) & 0xFF00U) >> 8) \
121 	)
122 #endif
123 
124 #ifndef bswap32
125 #	define bswap32(n) (uint32_t)( \
126 		  (((n) & UINT32_C(0x000000FF)) << 24) \
127 		| (((n) & UINT32_C(0x0000FF00)) << 8) \
128 		| (((n) & UINT32_C(0x00FF0000)) >> 8) \
129 		| (((n) & UINT32_C(0xFF000000)) >> 24) \
130 	)
131 #endif
132 
133 #ifndef bswap64
134 #	define bswap64(n) (uint64_t)( \
135 		  (((n) & UINT64_C(0x00000000000000FF)) << 56) \
136 		| (((n) & UINT64_C(0x000000000000FF00)) << 40) \
137 		| (((n) & UINT64_C(0x0000000000FF0000)) << 24) \
138 		| (((n) & UINT64_C(0x00000000FF000000)) << 8) \
139 		| (((n) & UINT64_C(0x000000FF00000000)) >> 8) \
140 		| (((n) & UINT64_C(0x0000FF0000000000)) >> 24) \
141 		| (((n) & UINT64_C(0x00FF000000000000)) >> 40) \
142 		| (((n) & UINT64_C(0xFF00000000000000)) >> 56) \
143 	)
144 #endif
145 
146 // Define conversion macros using the basic byte swapping macros.
147 #ifdef WORDS_BIGENDIAN
148 #	ifndef conv16be
149 #		define conv16be(num) ((uint16_t)(num))
150 #	endif
151 #	ifndef conv32be
152 #		define conv32be(num) ((uint32_t)(num))
153 #	endif
154 #	ifndef conv64be
155 #		define conv64be(num) ((uint64_t)(num))
156 #	endif
157 #	ifndef conv16le
158 #		define conv16le(num) bswap16(num)
159 #	endif
160 #	ifndef conv32le
161 #		define conv32le(num) bswap32(num)
162 #	endif
163 #	ifndef conv64le
164 #		define conv64le(num) bswap64(num)
165 #	endif
166 #else
167 #	ifndef conv16be
168 #		define conv16be(num) bswap16(num)
169 #	endif
170 #	ifndef conv32be
171 #		define conv32be(num) bswap32(num)
172 #	endif
173 #	ifndef conv64be
174 #		define conv64be(num) bswap64(num)
175 #	endif
176 #	ifndef conv16le
177 #		define conv16le(num) ((uint16_t)(num))
178 #	endif
179 #	ifndef conv32le
180 #		define conv32le(num) ((uint32_t)(num))
181 #	endif
182 #	ifndef conv64le
183 #		define conv64le(num) ((uint64_t)(num))
184 #	endif
185 #endif
186 
187 
188 ////////////////////////////////
189 // Unaligned reads and writes //
190 ////////////////////////////////
191 
192 // The traditional way of casting e.g. *(const uint16_t *)uint8_pointer
193 // is bad even if the uint8_pointer is properly aligned because this kind
194 // of casts break strict aliasing rules and result in undefined behavior.
195 // With unaligned pointers it's even worse: compilers may emit vector
196 // instructions that require aligned pointers even if non-vector
197 // instructions work with unaligned pointers.
198 //
199 // Using memcpy() is the standard compliant way to do unaligned access.
200 // Many modern compilers inline it so there is no function call overhead.
201 // For those compilers that don't handle the memcpy() method well, the
202 // old casting method (that violates strict aliasing) can be requested at
203 // build time. A third method, casting to a packed struct, would also be
204 // an option but isn't provided to keep things simpler (it's already a mess).
205 // Hopefully this is flexible enough in practice.
206 
207 static inline uint16_t
208 read16ne(const uint8_t *buf)
209 {
210 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
211 		&& defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
212 	return *(const uint16_t *)buf;
213 #else
214 	uint16_t num;
215 	memcpy(&num, buf, sizeof(num));
216 	return num;
217 #endif
218 }
219 
220 
221 static inline uint32_t
222 read32ne(const uint8_t *buf)
223 {
224 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
225 		&& defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
226 	return *(const uint32_t *)buf;
227 #else
228 	uint32_t num;
229 	memcpy(&num, buf, sizeof(num));
230 	return num;
231 #endif
232 }
233 
234 
235 static inline uint64_t
236 read64ne(const uint8_t *buf)
237 {
238 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
239 		&& defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
240 	return *(const uint64_t *)buf;
241 #else
242 	uint64_t num;
243 	memcpy(&num, buf, sizeof(num));
244 	return num;
245 #endif
246 }
247 
248 
249 static inline void
250 write16ne(uint8_t *buf, uint16_t num)
251 {
252 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
253 		&& defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
254 	*(uint16_t *)buf = num;
255 #else
256 	memcpy(buf, &num, sizeof(num));
257 #endif
258 	return;
259 }
260 
261 
262 static inline void
263 write32ne(uint8_t *buf, uint32_t num)
264 {
265 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
266 		&& defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
267 	*(uint32_t *)buf = num;
268 #else
269 	memcpy(buf, &num, sizeof(num));
270 #endif
271 	return;
272 }
273 
274 
275 static inline void
276 write64ne(uint8_t *buf, uint64_t num)
277 {
278 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
279 		&& defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
280 	*(uint64_t *)buf = num;
281 #else
282 	memcpy(buf, &num, sizeof(num));
283 #endif
284 	return;
285 }
286 
287 
288 static inline uint16_t
289 read16be(const uint8_t *buf)
290 {
291 #if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
292 	uint16_t num = read16ne(buf);
293 	return conv16be(num);
294 #else
295 	uint16_t num = ((uint16_t)buf[0] << 8) | (uint16_t)buf[1];
296 	return num;
297 #endif
298 }
299 
300 
301 static inline uint16_t
302 read16le(const uint8_t *buf)
303 {
304 #if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
305 	uint16_t num = read16ne(buf);
306 	return conv16le(num);
307 #else
308 	uint16_t num = ((uint16_t)buf[0]) | ((uint16_t)buf[1] << 8);
309 	return num;
310 #endif
311 }
312 
313 
314 static inline uint32_t
315 read32be(const uint8_t *buf)
316 {
317 #if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
318 	uint32_t num = read32ne(buf);
319 	return conv32be(num);
320 #else
321 	uint32_t num = (uint32_t)buf[0] << 24;
322 	num |= (uint32_t)buf[1] << 16;
323 	num |= (uint32_t)buf[2] << 8;
324 	num |= (uint32_t)buf[3];
325 	return num;
326 #endif
327 }
328 
329 
330 static inline uint32_t
331 read32le(const uint8_t *buf)
332 {
333 #if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
334 	uint32_t num = read32ne(buf);
335 	return conv32le(num);
336 #else
337 	uint32_t num = (uint32_t)buf[0];
338 	num |= (uint32_t)buf[1] << 8;
339 	num |= (uint32_t)buf[2] << 16;
340 	num |= (uint32_t)buf[3] << 24;
341 	return num;
342 #endif
343 }
344 
345 
346 static inline uint64_t
347 read64be(const uint8_t *buf)
348 {
349 #if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
350 	uint64_t num = read64ne(buf);
351 	return conv64be(num);
352 #else
353 	uint64_t num = (uint64_t)buf[0] << 56;
354 	num |= (uint64_t)buf[1] << 48;
355 	num |= (uint64_t)buf[2] << 40;
356 	num |= (uint64_t)buf[3] << 32;
357 	num |= (uint64_t)buf[4] << 24;
358 	num |= (uint64_t)buf[5] << 16;
359 	num |= (uint64_t)buf[6] << 8;
360 	num |= (uint64_t)buf[7];
361 	return num;
362 #endif
363 }
364 
365 
366 static inline uint64_t
367 read64le(const uint8_t *buf)
368 {
369 #if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
370 	uint64_t num = read64ne(buf);
371 	return conv64le(num);
372 #else
373 	uint64_t num = (uint64_t)buf[0];
374 	num |= (uint64_t)buf[1] << 8;
375 	num |= (uint64_t)buf[2] << 16;
376 	num |= (uint64_t)buf[3] << 24;
377 	num |= (uint64_t)buf[4] << 32;
378 	num |= (uint64_t)buf[5] << 40;
379 	num |= (uint64_t)buf[6] << 48;
380 	num |= (uint64_t)buf[7] << 56;
381 	return num;
382 #endif
383 }
384 
385 
386 // NOTE: Possible byte swapping must be done in a macro to allow the compiler
387 // to optimize byte swapping of constants when using glibc's or *BSD's
388 // byte swapping macros. The actual write is done in an inline function
389 // to make type checking of the buf pointer possible.
390 #if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
391 #	define write16be(buf, num) write16ne(buf, conv16be(num))
392 #	define write32be(buf, num) write32ne(buf, conv32be(num))
393 #	define write64be(buf, num) write64ne(buf, conv64be(num))
394 #endif
395 
396 #if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
397 #	define write16le(buf, num) write16ne(buf, conv16le(num))
398 #	define write32le(buf, num) write32ne(buf, conv32le(num))
399 #	define write64le(buf, num) write64ne(buf, conv64le(num))
400 #endif
401 
402 
403 #ifndef write16be
404 static inline void
405 write16be(uint8_t *buf, uint16_t num)
406 {
407 	buf[0] = (uint8_t)(num >> 8);
408 	buf[1] = (uint8_t)num;
409 	return;
410 }
411 #endif
412 
413 
414 #ifndef write16le
415 static inline void
416 write16le(uint8_t *buf, uint16_t num)
417 {
418 	buf[0] = (uint8_t)num;
419 	buf[1] = (uint8_t)(num >> 8);
420 	return;
421 }
422 #endif
423 
424 
425 #ifndef write32be
426 static inline void
427 write32be(uint8_t *buf, uint32_t num)
428 {
429 	buf[0] = (uint8_t)(num >> 24);
430 	buf[1] = (uint8_t)(num >> 16);
431 	buf[2] = (uint8_t)(num >> 8);
432 	buf[3] = (uint8_t)num;
433 	return;
434 }
435 #endif
436 
437 
438 #ifndef write32le
439 static inline void
440 write32le(uint8_t *buf, uint32_t num)
441 {
442 	buf[0] = (uint8_t)num;
443 	buf[1] = (uint8_t)(num >> 8);
444 	buf[2] = (uint8_t)(num >> 16);
445 	buf[3] = (uint8_t)(num >> 24);
446 	return;
447 }
448 #endif
449 
450 
451 //////////////////////////////
452 // Aligned reads and writes //
453 //////////////////////////////
454 
455 // Separate functions for aligned reads and writes are provided since on
456 // strict-align archs aligned access is much faster than unaligned access.
457 //
458 // Just like in the unaligned case, memcpy() is needed to avoid
459 // strict aliasing violations. However, on archs that don't support
460 // unaligned access the compiler cannot know that the pointers given
461 // to memcpy() are aligned which results in slow code. As of C11 there is
462 // no standard way to tell the compiler that we know that the address is
463 // aligned but some compilers have language extensions to do that. With
464 // such language extensions the memcpy() method gives excellent results.
465 //
466 // What to do on a strict-align system when no known language extentensions
467 // are available? Falling back to byte-by-byte access would be safe but ruin
468 // optimizations that have been made specifically with aligned access in mind.
469 // As a compromise, aligned reads will fall back to non-compliant type punning
470 // but aligned writes will be byte-by-byte, that is, fast reads are preferred
471 // over fast writes. This obviously isn't great but hopefully it's a working
472 // compromise for now.
473 //
474 // __builtin_assume_aligned is support by GCC >= 4.7 and clang >= 3.6.
475 #ifdef HAVE___BUILTIN_ASSUME_ALIGNED
476 #	define tuklib_memcpy_aligned(dest, src, size) \
477 		memcpy(dest, __builtin_assume_aligned(src, size), size)
478 #else
479 #	define tuklib_memcpy_aligned(dest, src, size) \
480 		memcpy(dest, src, size)
481 #	ifndef TUKLIB_FAST_UNALIGNED_ACCESS
482 #		define TUKLIB_USE_UNSAFE_ALIGNED_READS 1
483 #	endif
484 #endif
485 
486 
487 static inline uint16_t
488 aligned_read16ne(const uint8_t *buf)
489 {
490 #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
491 		|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
492 	return *(const uint16_t *)buf;
493 #else
494 	uint16_t num;
495 	tuklib_memcpy_aligned(&num, buf, sizeof(num));
496 	return num;
497 #endif
498 }
499 
500 
501 static inline uint32_t
502 aligned_read32ne(const uint8_t *buf)
503 {
504 #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
505 		|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
506 	return *(const uint32_t *)buf;
507 #else
508 	uint32_t num;
509 	tuklib_memcpy_aligned(&num, buf, sizeof(num));
510 	return num;
511 #endif
512 }
513 
514 
515 static inline uint64_t
516 aligned_read64ne(const uint8_t *buf)
517 {
518 #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
519 		|| defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
520 	return *(const uint64_t *)buf;
521 #else
522 	uint64_t num;
523 	tuklib_memcpy_aligned(&num, buf, sizeof(num));
524 	return num;
525 #endif
526 }
527 
528 
529 static inline void
530 aligned_write16ne(uint8_t *buf, uint16_t num)
531 {
532 #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
533 	*(uint16_t *)buf = num;
534 #else
535 	tuklib_memcpy_aligned(buf, &num, sizeof(num));
536 #endif
537 	return;
538 }
539 
540 
541 static inline void
542 aligned_write32ne(uint8_t *buf, uint32_t num)
543 {
544 #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
545 	*(uint32_t *)buf = num;
546 #else
547 	tuklib_memcpy_aligned(buf, &num, sizeof(num));
548 #endif
549 	return;
550 }
551 
552 
553 static inline void
554 aligned_write64ne(uint8_t *buf, uint64_t num)
555 {
556 #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
557 	*(uint64_t *)buf = num;
558 #else
559 	tuklib_memcpy_aligned(buf, &num, sizeof(num));
560 #endif
561 	return;
562 }
563 
564 
565 static inline uint16_t
566 aligned_read16be(const uint8_t *buf)
567 {
568 	uint16_t num = aligned_read16ne(buf);
569 	return conv16be(num);
570 }
571 
572 
573 static inline uint16_t
574 aligned_read16le(const uint8_t *buf)
575 {
576 	uint16_t num = aligned_read16ne(buf);
577 	return conv16le(num);
578 }
579 
580 
581 static inline uint32_t
582 aligned_read32be(const uint8_t *buf)
583 {
584 	uint32_t num = aligned_read32ne(buf);
585 	return conv32be(num);
586 }
587 
588 
589 static inline uint32_t
590 aligned_read32le(const uint8_t *buf)
591 {
592 	uint32_t num = aligned_read32ne(buf);
593 	return conv32le(num);
594 }
595 
596 
597 static inline uint64_t
598 aligned_read64be(const uint8_t *buf)
599 {
600 	uint64_t num = aligned_read64ne(buf);
601 	return conv64be(num);
602 }
603 
604 
605 static inline uint64_t
606 aligned_read64le(const uint8_t *buf)
607 {
608 	uint64_t num = aligned_read64ne(buf);
609 	return conv64le(num);
610 }
611 
612 
613 // These need to be macros like in the unaligned case.
614 #define aligned_write16be(buf, num) aligned_write16ne((buf), conv16be(num))
615 #define aligned_write16le(buf, num) aligned_write16ne((buf), conv16le(num))
616 #define aligned_write32be(buf, num) aligned_write32ne((buf), conv32be(num))
617 #define aligned_write32le(buf, num) aligned_write32ne((buf), conv32le(num))
618 #define aligned_write64be(buf, num) aligned_write64ne((buf), conv64be(num))
619 #define aligned_write64le(buf, num) aligned_write64ne((buf), conv64le(num))
620 
621 
622 ////////////////////
623 // Bit operations //
624 ////////////////////
625 
626 static inline uint32_t
627 bsr32(uint32_t n)
628 {
629 	// Check for ICC first, since it tends to define __GNUC__ too.
630 #if defined(__INTEL_COMPILER)
631 	return _bit_scan_reverse(n);
632 
633 #elif TUKLIB_GNUC_REQ(3, 4) && UINT_MAX == UINT32_MAX
634 	// GCC >= 3.4 has __builtin_clz(), which gives good results on
635 	// multiple architectures. On x86, __builtin_clz() ^ 31U becomes
636 	// either plain BSR (so the XOR gets optimized away) or LZCNT and
637 	// XOR (if -march indicates that SSE4a instructions are supported).
638 	return (uint32_t)__builtin_clz(n) ^ 31U;
639 
640 #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
641 	uint32_t i;
642 	__asm__("bsrl %1, %0" : "=r" (i) : "rm" (n));
643 	return i;
644 
645 #elif defined(_MSC_VER)
646 	unsigned long i;
647 	_BitScanReverse(&i, n);
648 	return i;
649 
650 #else
651 	uint32_t i = 31;
652 
653 	if ((n & 0xFFFF0000) == 0) {
654 		n <<= 16;
655 		i = 15;
656 	}
657 
658 	if ((n & 0xFF000000) == 0) {
659 		n <<= 8;
660 		i -= 8;
661 	}
662 
663 	if ((n & 0xF0000000) == 0) {
664 		n <<= 4;
665 		i -= 4;
666 	}
667 
668 	if ((n & 0xC0000000) == 0) {
669 		n <<= 2;
670 		i -= 2;
671 	}
672 
673 	if ((n & 0x80000000) == 0)
674 		--i;
675 
676 	return i;
677 #endif
678 }
679 
680 
681 static inline uint32_t
682 clz32(uint32_t n)
683 {
684 #if defined(__INTEL_COMPILER)
685 	return _bit_scan_reverse(n) ^ 31U;
686 
687 #elif TUKLIB_GNUC_REQ(3, 4) && UINT_MAX == UINT32_MAX
688 	return (uint32_t)__builtin_clz(n);
689 
690 #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
691 	uint32_t i;
692 	__asm__("bsrl %1, %0\n\t"
693 		"xorl $31, %0"
694 		: "=r" (i) : "rm" (n));
695 	return i;
696 
697 #elif defined(_MSC_VER)
698 	unsigned long i;
699 	_BitScanReverse(&i, n);
700 	return i ^ 31U;
701 
702 #else
703 	uint32_t i = 0;
704 
705 	if ((n & 0xFFFF0000) == 0) {
706 		n <<= 16;
707 		i = 16;
708 	}
709 
710 	if ((n & 0xFF000000) == 0) {
711 		n <<= 8;
712 		i += 8;
713 	}
714 
715 	if ((n & 0xF0000000) == 0) {
716 		n <<= 4;
717 		i += 4;
718 	}
719 
720 	if ((n & 0xC0000000) == 0) {
721 		n <<= 2;
722 		i += 2;
723 	}
724 
725 	if ((n & 0x80000000) == 0)
726 		++i;
727 
728 	return i;
729 #endif
730 }
731 
732 
733 static inline uint32_t
734 ctz32(uint32_t n)
735 {
736 #if defined(__INTEL_COMPILER)
737 	return _bit_scan_forward(n);
738 
739 #elif TUKLIB_GNUC_REQ(3, 4) && UINT_MAX >= UINT32_MAX
740 	return (uint32_t)__builtin_ctz(n);
741 
742 #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
743 	uint32_t i;
744 	__asm__("bsfl %1, %0" : "=r" (i) : "rm" (n));
745 	return i;
746 
747 #elif defined(_MSC_VER)
748 	unsigned long i;
749 	_BitScanForward(&i, n);
750 	return i;
751 
752 #else
753 	uint32_t i = 0;
754 
755 	if ((n & 0x0000FFFF) == 0) {
756 		n >>= 16;
757 		i = 16;
758 	}
759 
760 	if ((n & 0x000000FF) == 0) {
761 		n >>= 8;
762 		i += 8;
763 	}
764 
765 	if ((n & 0x0000000F) == 0) {
766 		n >>= 4;
767 		i += 4;
768 	}
769 
770 	if ((n & 0x00000003) == 0) {
771 		n >>= 2;
772 		i += 2;
773 	}
774 
775 	if ((n & 0x00000001) == 0)
776 		++i;
777 
778 	return i;
779 #endif
780 }
781 
782 #define bsf32 ctz32
783 
784 #endif
785