1 #if !defined(phmap_bits_h_guard_)
2 #define phmap_bits_h_guard_
3 
4 // ---------------------------------------------------------------------------
5 // Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com
6 //
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 //
11 //      https://www.apache.org/licenses/LICENSE-2.0
12 //
13 // Unless required by applicable law or agreed to in writing, software
14 // distributed under the License is distributed on an "AS IS" BASIS,
15 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 // See the License for the specific language governing permissions and
17 // limitations under the License.
18 //
19 // Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp)
20 // with modifications.
21 //
22 // Copyright 2018 The Abseil Authors.
23 //
24 // Licensed under the Apache License, Version 2.0 (the "License");
25 // you may not use this file except in compliance with the License.
26 // You may obtain a copy of the License at
27 //
28 //      https://www.apache.org/licenses/LICENSE-2.0
29 //
30 // Unless required by applicable law or agreed to in writing, software
31 // distributed under the License is distributed on an "AS IS" BASIS,
32 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
33 // See the License for the specific language governing permissions and
34 // limitations under the License.
35 // ---------------------------------------------------------------------------
36 
37 // The following guarantees declaration of the byte swap functions
38 #ifdef _MSC_VER
39     #include <stdlib.h>  // NOLINT(build/include)
40 #elif defined(__APPLE__)
41     // Mac OS X / Darwin features
42     #include <libkern/OSByteOrder.h>
43 #elif defined(__FreeBSD__)
44     #include <sys/endian.h>
45 #elif defined(__GLIBC__)
46     #include <byteswap.h>  // IWYU pragma: export
47 #endif
48 
49 #include <string.h>
50 #include <cstdint>
51 #include "phmap_config.h"
52 
53 #ifdef _MSC_VER
54     #pragma warning(push)
55     #pragma warning(disable : 4514) // unreferenced inline function has been removed
56 #endif
57 
58 // -----------------------------------------------------------------------------
59 // unaligned APIs
60 // -----------------------------------------------------------------------------
61 // Portable handling of unaligned loads, stores, and copies.
62 // On some platforms, like ARM, the copy functions can be more efficient
63 // then a load and a store.
64 // -----------------------------------------------------------------------------
65 
66 #if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\
67     defined(MEMORY_SANITIZER)
68 #include <stdint.h>
69 
70 extern "C" {
71     uint16_t __sanitizer_unaligned_load16(const void *p);
72     uint32_t __sanitizer_unaligned_load32(const void *p);
73     uint64_t __sanitizer_unaligned_load64(const void *p);
74     void __sanitizer_unaligned_store16(void *p, uint16_t v);
75     void __sanitizer_unaligned_store32(void *p, uint32_t v);
76     void __sanitizer_unaligned_store64(void *p, uint64_t v);
77 }  // extern "C"
78 
79 namespace phmap {
80 namespace bits {
81 
UnalignedLoad16(const void * p)82 inline uint16_t UnalignedLoad16(const void *p) {
83   return __sanitizer_unaligned_load16(p);
84 }
85 
UnalignedLoad32(const void * p)86 inline uint32_t UnalignedLoad32(const void *p) {
87   return __sanitizer_unaligned_load32(p);
88 }
89 
UnalignedLoad64(const void * p)90 inline uint64_t UnalignedLoad64(const void *p) {
91   return __sanitizer_unaligned_load64(p);
92 }
93 
UnalignedStore16(void * p,uint16_t v)94 inline void UnalignedStore16(void *p, uint16_t v) {
95   __sanitizer_unaligned_store16(p, v);
96 }
97 
UnalignedStore32(void * p,uint32_t v)98 inline void UnalignedStore32(void *p, uint32_t v) {
99   __sanitizer_unaligned_store32(p, v);
100 }
101 
UnalignedStore64(void * p,uint64_t v)102 inline void UnalignedStore64(void *p, uint64_t v) {
103   __sanitizer_unaligned_store64(p, v);
104 }
105 
106 }  // namespace bits
107 }  // namespace phmap
108 
109 #define PHMAP_INTERNAL_UNALIGNED_LOAD16(_p) (phmap::bits::UnalignedLoad16(_p))
110 #define PHMAP_INTERNAL_UNALIGNED_LOAD32(_p) (phmap::bits::UnalignedLoad32(_p))
111 #define PHMAP_INTERNAL_UNALIGNED_LOAD64(_p) (phmap::bits::UnalignedLoad64(_p))
112 
113 #define PHMAP_INTERNAL_UNALIGNED_STORE16(_p, _val) (phmap::bits::UnalignedStore16(_p, _val))
114 #define PHMAP_INTERNAL_UNALIGNED_STORE32(_p, _val) (phmap::bits::UnalignedStore32(_p, _val))
115 #define PHMAP_INTERNAL_UNALIGNED_STORE64(_p, _val) (phmap::bits::UnalignedStore64(_p, _val))
116 
117 #else
118 
119 namespace phmap {
120 namespace bits {
121 
UnalignedLoad16(const void * p)122 inline uint16_t UnalignedLoad16(const void *p) {
123   uint16_t t;
124   memcpy(&t, p, sizeof t);
125   return t;
126 }
127 
UnalignedLoad32(const void * p)128 inline uint32_t UnalignedLoad32(const void *p) {
129   uint32_t t;
130   memcpy(&t, p, sizeof t);
131   return t;
132 }
133 
UnalignedLoad64(const void * p)134 inline uint64_t UnalignedLoad64(const void *p) {
135   uint64_t t;
136   memcpy(&t, p, sizeof t);
137   return t;
138 }
139 
UnalignedStore16(void * p,uint16_t v)140 inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }
141 
UnalignedStore32(void * p,uint32_t v)142 inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
143 
UnalignedStore64(void * p,uint64_t v)144 inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
145 
146 }  // namespace bits
147 }  // namespace phmap
148 
149 #define PHMAP_INTERNAL_UNALIGNED_LOAD16(_p) (phmap::bits::UnalignedLoad16(_p))
150 #define PHMAP_INTERNAL_UNALIGNED_LOAD32(_p) (phmap::bits::UnalignedLoad32(_p))
151 #define PHMAP_INTERNAL_UNALIGNED_LOAD64(_p) (phmap::bits::UnalignedLoad64(_p))
152 
153 #define PHMAP_INTERNAL_UNALIGNED_STORE16(_p, _val) (phmap::bits::UnalignedStore16(_p, _val))
154 #define PHMAP_INTERNAL_UNALIGNED_STORE32(_p, _val) (phmap::bits::UnalignedStore32(_p, _val))
155 #define PHMAP_INTERNAL_UNALIGNED_STORE64(_p, _val) (phmap::bits::UnalignedStore64(_p, _val))
156 
157 #endif
158 
159 // -----------------------------------------------------------------------------
160 // File: optimization.h
161 // -----------------------------------------------------------------------------
162 
163 #if defined(__pnacl__)
164     #define PHMAP_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
165 #elif defined(__clang__)
166     // Clang will not tail call given inline volatile assembly.
167     #define PHMAP_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
168 #elif defined(__GNUC__)
169     // GCC will not tail call given inline volatile assembly.
170     #define PHMAP_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
171 #elif defined(_MSC_VER)
172     #include <intrin.h>
173     // The __nop() intrinsic blocks the optimisation.
174     #define PHMAP_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
175 #else
176     #define PHMAP_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
177 #endif
178 
179 #if defined(__GNUC__)
180     #pragma GCC diagnostic push
181     #pragma GCC diagnostic ignored "-Wpedantic"
182 #endif
183 
184 #ifdef PHMAP_HAVE_INTRINSIC_INT128
185     __extension__ typedef unsigned __int128 phmap_uint128;
umul128(uint64_t a,uint64_t b,uint64_t * high)186     inline uint64_t umul128(uint64_t a, uint64_t b, uint64_t* high)
187     {
188         auto result = static_cast<phmap_uint128>(a) * static_cast<phmap_uint128>(b);
189         *high = static_cast<uint64_t>(result >> 64);
190         return static_cast<uint64_t>(result);
191     }
192     #define PHMAP_HAS_UMUL128 1
193 #elif (defined(_MSC_VER))
194     #if defined(_M_X64)
195         #pragma intrinsic(_umul128)
umul128(uint64_t a,uint64_t b,uint64_t * high)196         inline uint64_t umul128(uint64_t a, uint64_t b, uint64_t* high)
197         {
198             return _umul128(a, b, high);
199         }
200         #define PHMAP_HAS_UMUL128 1
201     #endif
202 #endif
203 
204 #if defined(__GNUC__)
205     #pragma GCC diagnostic pop
206 #endif
207 
208 #if defined(__GNUC__)
209     // Cache line alignment
210     #if defined(__i386__) || defined(__x86_64__)
211         #define PHMAP_CACHELINE_SIZE 64
212     #elif defined(__powerpc64__)
213         #define PHMAP_CACHELINE_SIZE 128
214     #elif defined(__aarch64__)
215         // We would need to read special register ctr_el0 to find out L1 dcache size.
216         // This value is a good estimate based on a real aarch64 machine.
217         #define PHMAP_CACHELINE_SIZE 64
218     #elif defined(__arm__)
219         // Cache line sizes for ARM: These values are not strictly correct since
220         // cache line sizes depend on implementations, not architectures.  There
221         // are even implementations with cache line sizes configurable at boot
222         // time.
223         #if defined(__ARM_ARCH_5T__)
224             #define PHMAP_CACHELINE_SIZE 32
225         #elif defined(__ARM_ARCH_7A__)
226             #define PHMAP_CACHELINE_SIZE 64
227         #endif
228     #endif
229 
230     #ifndef PHMAP_CACHELINE_SIZE
231         // A reasonable default guess.  Note that overestimates tend to waste more
232         // space, while underestimates tend to waste more time.
233         #define PHMAP_CACHELINE_SIZE 64
234     #endif
235 
236     #define PHMAP_CACHELINE_ALIGNED __attribute__((aligned(PHMAP_CACHELINE_SIZE)))
237 #elif defined(_MSC_VER)
238     #define PHMAP_CACHELINE_SIZE 64
239     #define PHMAP_CACHELINE_ALIGNED __declspec(align(PHMAP_CACHELINE_SIZE))
240 #else
241     #define PHMAP_CACHELINE_SIZE 64
242     #define PHMAP_CACHELINE_ALIGNED
243 #endif
244 
245 
246 #if PHMAP_HAVE_BUILTIN(__builtin_expect) || \
247     (defined(__GNUC__) && !defined(__clang__))
248     #define PHMAP_PREDICT_FALSE(x) (__builtin_expect(x, 0))
249     #define PHMAP_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
250 #else
251     #define PHMAP_PREDICT_FALSE(x) (x)
252     #define PHMAP_PREDICT_TRUE(x) (x)
253 #endif
254 
255 // -----------------------------------------------------------------------------
256 // File: bits.h
257 // -----------------------------------------------------------------------------
258 
259 #if defined(_MSC_VER)
260     // We can achieve something similar to attribute((always_inline)) with MSVC by
261     // using the __forceinline keyword, however this is not perfect. MSVC is
262     // much less aggressive about inlining, and even with the __forceinline keyword.
263     #define PHMAP_BASE_INTERNAL_FORCEINLINE __forceinline
264 #else
265     // Use default attribute inline.
266     #define PHMAP_BASE_INTERNAL_FORCEINLINE inline PHMAP_ATTRIBUTE_ALWAYS_INLINE
267 #endif
268 
269 
270 namespace phmap {
271 namespace base_internal {
272 
CountLeadingZeros64Slow(uint64_t n)273 PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
274     int zeroes = 60;
275     if (n >> 32) zeroes -= 32, n >>= 32;
276     if (n >> 16) zeroes -= 16, n >>= 16;
277     if (n >> 8) zeroes -= 8, n >>= 8;
278     if (n >> 4) zeroes -= 4, n >>= 4;
279     return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
280 }
281 
CountLeadingZeros64(uint64_t n)282 PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
283 #if defined(_MSC_VER) && defined(_M_X64)
284     // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
285     unsigned long result = 0;  // NOLINT(runtime/int)
286     if (_BitScanReverse64(&result, n)) {
287         return (int)(63 - result);
288     }
289     return 64;
290 #elif defined(_MSC_VER) && !defined(__clang__)
291     // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
292     unsigned long result = 0;  // NOLINT(runtime/int)
293     if ((n >> 32) && _BitScanReverse(&result, (unsigned long)(n >> 32))) {
294         return 31 - result;
295     }
296     if (_BitScanReverse(&result, (unsigned long)n)) {
297         return 63 - result;
298     }
299     return 64;
300 #elif defined(__GNUC__) || defined(__clang__)
301     // Use __builtin_clzll, which uses the following instructions:
302     //  x86: bsr
303     //  ARM64: clz
304     //  PPC: cntlzd
305     static_assert(sizeof(unsigned long long) == sizeof(n),  // NOLINT(runtime/int)
306                   "__builtin_clzll does not take 64-bit arg");
307 
308     // Handle 0 as a special case because __builtin_clzll(0) is undefined.
309     if (n == 0) {
310         return 64;
311     }
312     return __builtin_clzll(n);
313 #else
314     return CountLeadingZeros64Slow(n);
315 #endif
316 }
317 
CountLeadingZeros32Slow(uint64_t n)318 PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) {
319     int zeroes = 28;
320     if (n >> 16) zeroes -= 16, n >>= 16;
321     if (n >> 8) zeroes -= 8, n >>= 8;
322     if (n >> 4) zeroes -= 4, n >>= 4;
323     return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
324 }
325 
CountLeadingZeros32(uint32_t n)326 PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) {
327 #if defined(_MSC_VER) && !defined(__clang__)
328     unsigned long result = 0;  // NOLINT(runtime/int)
329     if (_BitScanReverse(&result, n)) {
330         return (int)(31 - result);
331     }
332     return 32;
333 #elif defined(__GNUC__) || defined(__clang__)
334     // Use __builtin_clz, which uses the following instructions:
335     //  x86: bsr
336     //  ARM64: clz
337     //  PPC: cntlzd
338     static_assert(sizeof(int) == sizeof(n),
339                   "__builtin_clz does not take 32-bit arg");
340 
341     // Handle 0 as a special case because __builtin_clz(0) is undefined.
342     if (n == 0) {
343         return 32;
344     }
345     return __builtin_clz(n);
346 #else
347     return CountLeadingZeros32Slow(n);
348 #endif
349 }
350 
CountTrailingZerosNonZero64Slow(uint64_t n)351 PHMAP_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) {
352     int c = 63;
353     n &= ~n + 1;
354     if (n & 0x00000000FFFFFFFF) c -= 32;
355     if (n & 0x0000FFFF0000FFFF) c -= 16;
356     if (n & 0x00FF00FF00FF00FF) c -= 8;
357     if (n & 0x0F0F0F0F0F0F0F0F) c -= 4;
358     if (n & 0x3333333333333333) c -= 2;
359     if (n & 0x5555555555555555) c -= 1;
360     return c;
361 }
362 
CountTrailingZerosNonZero64(uint64_t n)363 PHMAP_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
364 #if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
365     unsigned long result = 0;  // NOLINT(runtime/int)
366     _BitScanForward64(&result, n);
367     return (int)result;
368 #elif defined(_MSC_VER) && !defined(__clang__)
369     unsigned long result = 0;  // NOLINT(runtime/int)
370     if (static_cast<uint32_t>(n) == 0) {
371         _BitScanForward(&result, (unsigned long)(n >> 32));
372         return result + 32;
373     }
374     _BitScanForward(&result, (unsigned long)n);
375     return result;
376 #elif defined(__GNUC__) || defined(__clang__)
377     static_assert(sizeof(unsigned long long) == sizeof(n),  // NOLINT(runtime/int)
378                   "__builtin_ctzll does not take 64-bit arg");
379     return __builtin_ctzll(n);
380 #else
381     return CountTrailingZerosNonZero64Slow(n);
382 #endif
383 }
384 
CountTrailingZerosNonZero32Slow(uint32_t n)385 PHMAP_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) {
386     int c = 31;
387     n &= ~n + 1;
388     if (n & 0x0000FFFF) c -= 16;
389     if (n & 0x00FF00FF) c -= 8;
390     if (n & 0x0F0F0F0F) c -= 4;
391     if (n & 0x33333333) c -= 2;
392     if (n & 0x55555555) c -= 1;
393     return c;
394 }
395 
CountTrailingZerosNonZero32(uint32_t n)396 PHMAP_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) {
397 #if defined(_MSC_VER) && !defined(__clang__)
398     unsigned long result = 0;  // NOLINT(runtime/int)
399     _BitScanForward(&result, n);
400     return (int)result;
401 #elif defined(__GNUC__) || defined(__clang__)
402     static_assert(sizeof(int) == sizeof(n),
403                   "__builtin_ctz does not take 32-bit arg");
404     return __builtin_ctz(n);
405 #else
406     return CountTrailingZerosNonZero32Slow(n);
407 #endif
408 }
409 
410 #undef PHMAP_BASE_INTERNAL_FORCEINLINE
411 
412 }  // namespace base_internal
413 }  // namespace phmap
414 
415 // -----------------------------------------------------------------------------
416 // File: endian.h
417 // -----------------------------------------------------------------------------
418 
419 namespace phmap {
420 
421 // Use compiler byte-swapping intrinsics if they are available.  32-bit
422 // and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
423 // The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
424 // For simplicity, we enable them all only for GCC 4.8.0 or later.
425 #if defined(__clang__) || \
426     (defined(__GNUC__) && \
427      ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
428 
gbswap_64(uint64_t host_int)429     inline uint64_t gbswap_64(uint64_t host_int) {
430         return __builtin_bswap64(host_int);
431     }
gbswap_32(uint32_t host_int)432     inline uint32_t gbswap_32(uint32_t host_int) {
433         return __builtin_bswap32(host_int);
434     }
gbswap_16(uint16_t host_int)435     inline uint16_t gbswap_16(uint16_t host_int) {
436         return __builtin_bswap16(host_int);
437     }
438 
439 #elif defined(_MSC_VER)
440 
441     inline uint64_t gbswap_64(uint64_t host_int) {
442         return _byteswap_uint64(host_int);
443     }
444     inline uint32_t gbswap_32(uint32_t host_int) {
445         return _byteswap_ulong(host_int);
446     }
447     inline uint16_t gbswap_16(uint16_t host_int) {
448         return _byteswap_ushort(host_int);
449     }
450 
451 #elif defined(__APPLE__)
452 
453     inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); }
454     inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); }
455     inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); }
456 
457 #else
458 
459     inline uint64_t gbswap_64(uint64_t host_int) {
460 #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
461         // Adapted from /usr/include/byteswap.h.  Not available on Mac.
462         if (__builtin_constant_p(host_int)) {
463             return __bswap_constant_64(host_int);
464         } else {
465             uint64_t result;
466             __asm__("bswap %0" : "=r"(result) : "0"(host_int));
467             return result;
468         }
469 #elif defined(__GLIBC__)
470         return bswap_64(host_int);
471 #else
472         return (((host_int & uint64_t{0xFF}) << 56) |
473                 ((host_int & uint64_t{0xFF00}) << 40) |
474                 ((host_int & uint64_t{0xFF0000}) << 24) |
475                 ((host_int & uint64_t{0xFF000000}) << 8) |
476                 ((host_int & uint64_t{0xFF00000000}) >> 8) |
477                 ((host_int & uint64_t{0xFF0000000000}) >> 24) |
478                 ((host_int & uint64_t{0xFF000000000000}) >> 40) |
479                 ((host_int & uint64_t{0xFF00000000000000}) >> 56));
480 #endif  // bswap_64
481     }
482 
483     inline uint32_t gbswap_32(uint32_t host_int) {
484 #if defined(__GLIBC__)
485         return bswap_32(host_int);
486 #else
487         return (((host_int & uint32_t{0xFF}) << 24) |
488                 ((host_int & uint32_t{0xFF00}) << 8) |
489                 ((host_int & uint32_t{0xFF0000}) >> 8) |
490                 ((host_int & uint32_t{0xFF000000}) >> 24));
491 #endif
492     }
493 
494     inline uint16_t gbswap_16(uint16_t host_int) {
495 #if defined(__GLIBC__)
496         return bswap_16(host_int);
497 #else
498         return (((host_int & uint16_t{0xFF}) << 8) |
499                 ((host_int & uint16_t{0xFF00}) >> 8));
500 #endif
501     }
502 
503 #endif  // intrinics available
504 
505 #ifdef PHMAP_IS_LITTLE_ENDIAN
506 
507     // Definitions for ntohl etc. that don't require us to include
508     // netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
509     // than just #defining them because in debug mode, gcc doesn't
510     // correctly handle the (rather involved) definitions of bswap_32.
511     // gcc guarantees that inline functions are as fast as macros, so
512     // this isn't a performance hit.
ghtons(uint16_t x)513     inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
ghtonl(uint32_t x)514     inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
ghtonll(uint64_t x)515     inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
516 
517 #elif defined PHMAP_IS_BIG_ENDIAN
518 
519     // These definitions are simpler on big-endian machines
520     // These are functions instead of macros to avoid self-assignment warnings
521     // on calls such as "i = ghtnol(i);".  This also provides type checking.
ghtons(uint16_t x)522     inline uint16_t ghtons(uint16_t x) { return x; }
ghtonl(uint32_t x)523     inline uint32_t ghtonl(uint32_t x) { return x; }
ghtonll(uint64_t x)524     inline uint64_t ghtonll(uint64_t x) { return x; }
525 
526 #else
527     #error \
528         "Unsupported byte order: Either PHMAP_IS_BIG_ENDIAN or " \
529            "PHMAP_IS_LITTLE_ENDIAN must be defined"
530 #endif  // byte order
531 
gntohs(uint16_t x)532 inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
gntohl(uint32_t x)533 inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
gntohll(uint64_t x)534 inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
535 
536 // Utilities to convert numbers between the current hosts's native byte
537 // order and little-endian byte order
538 //
539 // Load/Store methods are alignment safe
540 namespace little_endian {
541 // Conversion functions.
542 #ifdef PHMAP_IS_LITTLE_ENDIAN
543 
FromHost16(uint16_t x)544     inline uint16_t FromHost16(uint16_t x) { return x; }
ToHost16(uint16_t x)545     inline uint16_t ToHost16(uint16_t x) { return x; }
546 
FromHost32(uint32_t x)547     inline uint32_t FromHost32(uint32_t x) { return x; }
ToHost32(uint32_t x)548     inline uint32_t ToHost32(uint32_t x) { return x; }
549 
FromHost64(uint64_t x)550     inline uint64_t FromHost64(uint64_t x) { return x; }
ToHost64(uint64_t x)551     inline uint64_t ToHost64(uint64_t x) { return x; }
552 
IsLittleEndian()553     inline constexpr bool IsLittleEndian() { return true; }
554 
555 #elif defined PHMAP_IS_BIG_ENDIAN
556 
557     inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
558     inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
559 
560     inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
561     inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
562 
563     inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
564     inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
565 
566     inline constexpr bool IsLittleEndian() { return false; }
567 
568 #endif /* ENDIAN */
569 
570 // Functions to do unaligned loads and stores in little-endian order.
Load16(const void * p)571 inline uint16_t Load16(const void *p) {
572   return ToHost16(PHMAP_INTERNAL_UNALIGNED_LOAD16(p));
573 }
574 
Store16(void * p,uint16_t v)575 inline void Store16(void *p, uint16_t v) {
576   PHMAP_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
577 }
578 
Load32(const void * p)579 inline uint32_t Load32(const void *p) {
580   return ToHost32(PHMAP_INTERNAL_UNALIGNED_LOAD32(p));
581 }
582 
Store32(void * p,uint32_t v)583 inline void Store32(void *p, uint32_t v) {
584   PHMAP_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
585 }
586 
Load64(const void * p)587 inline uint64_t Load64(const void *p) {
588   return ToHost64(PHMAP_INTERNAL_UNALIGNED_LOAD64(p));
589 }
590 
Store64(void * p,uint64_t v)591 inline void Store64(void *p, uint64_t v) {
592   PHMAP_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
593 }
594 
595 }  // namespace little_endian
596 
597 // Utilities to convert numbers between the current hosts's native byte
598 // order and big-endian byte order (same as network byte order)
599 //
600 // Load/Store methods are alignment safe
601 namespace big_endian {
602 #ifdef PHMAP_IS_LITTLE_ENDIAN
603 
FromHost16(uint16_t x)604     inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
ToHost16(uint16_t x)605     inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
606 
FromHost32(uint32_t x)607     inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
ToHost32(uint32_t x)608     inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
609 
FromHost64(uint64_t x)610     inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
ToHost64(uint64_t x)611     inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
612 
IsLittleEndian()613     inline constexpr bool IsLittleEndian() { return true; }
614 
615 #elif defined PHMAP_IS_BIG_ENDIAN
616 
617     inline uint16_t FromHost16(uint16_t x) { return x; }
618     inline uint16_t ToHost16(uint16_t x) { return x; }
619 
620     inline uint32_t FromHost32(uint32_t x) { return x; }
621     inline uint32_t ToHost32(uint32_t x) { return x; }
622 
623     inline uint64_t FromHost64(uint64_t x) { return x; }
624     inline uint64_t ToHost64(uint64_t x) { return x; }
625 
626     inline constexpr bool IsLittleEndian() { return false; }
627 
628 #endif /* ENDIAN */
629 
630 // Functions to do unaligned loads and stores in big-endian order.
Load16(const void * p)631 inline uint16_t Load16(const void *p) {
632   return ToHost16(PHMAP_INTERNAL_UNALIGNED_LOAD16(p));
633 }
634 
Store16(void * p,uint16_t v)635 inline void Store16(void *p, uint16_t v) {
636   PHMAP_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
637 }
638 
Load32(const void * p)639 inline uint32_t Load32(const void *p) {
640   return ToHost32(PHMAP_INTERNAL_UNALIGNED_LOAD32(p));
641 }
642 
Store32(void * p,uint32_t v)643 inline void Store32(void *p, uint32_t v) {
644   PHMAP_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
645 }
646 
Load64(const void * p)647 inline uint64_t Load64(const void *p) {
648   return ToHost64(PHMAP_INTERNAL_UNALIGNED_LOAD64(p));
649 }
650 
Store64(void * p,uint64_t v)651 inline void Store64(void *p, uint64_t v) {
652   PHMAP_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
653 }
654 
655 }  // namespace big_endian
656 
657 }  // namespace phmap
658 
659 #ifdef _MSC_VER
660      #pragma warning(pop)
661 #endif
662 
663 #endif // phmap_bits_h_guard_
664