1 // Copyright 2005 Google Inc. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS-IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 
16 //
17 // Utility functions that depend on bytesex. We define htonll and ntohll,
18 // as well as "Google" versions of all the standards: ghtonl, ghtons, and
19 // so on. These functions do exactly the same as their standard variants,
20 // but don't require including the dangerous netinet/in.h.
21 //
22 // Buffer routines will copy to and from buffers without causing
23 // a bus error when the architecture requires different byte alignments.
24 #ifndef S2_UTIL_ENDIAN_ENDIAN_H_
25 #define S2_UTIL_ENDIAN_ENDIAN_H_
26 
27 #include <cassert>
28 #include <type_traits>
29 
30 #include "s2/base/integral_types.h"
31 #include "s2/base/logging.h"
32 #include "s2/base/port.h"
33 #include "s2/third_party/absl/base/casts.h"
34 #include "s2/third_party/absl/base/port.h"
35 #include "s2/third_party/absl/numeric/int128.h"
36 
37 // Use compiler byte-swapping intrinsics if they are available.  32-bit
38 // and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
39 // The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
40 // For simplicity, we enable them all only for GCC 4.8.0 or later.
41 #if defined(__clang__) || \
42     (defined(__GNUC__) && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || \
43                            __GNUC__ >= 5))
44 
gbswap_64(uint64 host_int)45 inline uint64 gbswap_64(uint64 host_int) {
46   return __builtin_bswap64(host_int);
47 }
gbswap_32(uint32 host_int)48 inline uint32 gbswap_32(uint32 host_int) {
49   return __builtin_bswap32(host_int);
50 }
gbswap_16(uint16 host_int)51 inline uint16 gbswap_16(uint16 host_int) {
52   return __builtin_bswap16(host_int);
53 }
54 
55 #else
56 
gbswap_64(uint64 host_int)57 inline uint64 gbswap_64(uint64 host_int) {
58 #if defined(__GNUC__) && defined(__x86_64__) && \
59     !(defined(__APPLE__) && defined(__MACH__))
60   // Adapted from /usr/include/byteswap.h.  Not available on Mac.
61   if (__builtin_constant_p(host_int)) {
62     return __bswap_constant_64(host_int);
63   } else {
64     uint64 result;
65     __asm__("bswap %0" : "=r" (result) : "0" (host_int));
66     return result;
67   }
68 #elif defined(bswap_64)
69   return bswap_64(host_int);
70 #else
71   return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) |
72     (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32);
73 #endif  // bswap_64
74 }
gbswap_32(uint32 host_int)75 inline uint32 gbswap_32(uint32 host_int) {
76   return bswap_32(host_int);
77 }
gbswap_16(uint16 host_int)78 inline uint16 gbswap_16(uint16 host_int) {
79   return bswap_16(host_int);
80 }
81 
82 #endif  // intrinics available
83 
gbswap_128(absl::uint128 host_int)84 inline absl::uint128 gbswap_128(absl::uint128 host_int) {
85   return absl::MakeUint128(gbswap_64(absl::Uint128Low64(host_int)),
86                            gbswap_64(absl::Uint128High64(host_int)));
87 }
88 
89 #ifdef IS_LITTLE_ENDIAN
90 
91 // Definitions for ntohl etc. that don't require us to include
92 // netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
93 // than just #defining them because in debug mode, gcc doesn't
94 // correctly handle the (rather involved) definitions of bswap_32.
95 // gcc guarantees that inline functions are as fast as macros, so
96 // this isn't a performance hit.
ghtons(uint16 x)97 inline uint16 ghtons(uint16 x) { return gbswap_16(x); }
ghtonl(uint32 x)98 inline uint32 ghtonl(uint32 x) { return gbswap_32(x); }
ghtonll(uint64 x)99 inline uint64 ghtonll(uint64 x) { return gbswap_64(x); }
100 
101 #elif defined IS_BIG_ENDIAN
102 
103 // These definitions are simpler on big-endian machines
104 // These are functions instead of macros to avoid self-assignment warnings
105 // on calls such as "i = ghtnol(i);".  This also provides type checking.
ghtons(uint16 x)106 inline uint16 ghtons(uint16 x) { return x; }
ghtonl(uint32 x)107 inline uint32 ghtonl(uint32 x) { return x; }
ghtonll(uint64 x)108 inline uint64 ghtonll(uint64 x) { return x; }
109 
110 #else
111 #error "Unsupported bytesex: Either IS_BIG_ENDIAN or IS_LITTLE_ENDIAN must be defined"  // NOLINT
112 #endif  // bytesex
113 
114 #ifndef htonll
115 // With the rise of 64-bit, some systems are beginning to define this.
116 #define htonll(x) ghtonll(x)
117 #endif  // htonll
118 
119 // ntoh* and hton* are the same thing for any size and bytesex,
120 // since the function is an involution, i.e., its own inverse.
gntohs(uint16 x)121 inline uint16 gntohs(uint16 x) { return ghtons(x); }
gntohl(uint32 x)122 inline uint32 gntohl(uint32 x) { return ghtonl(x); }
gntohll(uint64 x)123 inline uint64 gntohll(uint64 x) { return ghtonll(x); }
124 
125 #ifndef ntohll
126 #define ntohll(x) htonll(x)
127 #endif  // ntohll
128 
129 // We provide unified FromHost and ToHost APIs for all integral types and float,
130 // double types. If variable v's type is known to be one of these types, the
131 // client can simply call the following function without worrying about its
132 // return type.
133 //     LittleEndian::FromHost(v), or BigEndian::FromHost(v)
134 //     LittleEndian::ToHost(v), or BigEndian::ToHost(v)
135 // This unified FromHost and ToHost APIs are useful inside a template when the
136 // type of v is a template parameter.
137 //
138 // In order to unify all "IntType FromHostxx(ValueType)" and "IntType
139 // ToHostxx(ValueType)" APIs, we use the following trait class to automatically
140 // find the corresponding IntType given a ValueType, where IntType is an
141 // unsigned integer type with the same size of ValueType. The supported
142 // ValueTypes are uint8, uint16, uint32, uint64, int8, int16, int32, int64,
143 // bool, float, double.
144 //
145 // template <class ValueType>
146 // struct tofromhost_value_type_traits {
147 //   typedef ValueType value_type;
148 //   typedef IntType int_type;
149 // }
150 //
151 // We don't provide the default implementation for this trait struct.
152 // So that if ValueType is not supported by the FromHost and ToHost APIs, it
153 // will give a compile time error.
154 template <class ValueType>
155 struct tofromhost_value_type_traits;
156 
157 // General byte order converter class template. It provides a common
158 // implementation for LittleEndian::FromHost(ValueType),
159 // BigEndian::FromHost(ValueType), LittleEndian::ToHost(ValueType), and
160 // BigEndian::ToHost(ValueType).
161 template <class EndianClass, typename ValueType>
162 class GeneralFormatConverter {
163  public:
164   static typename tofromhost_value_type_traits<ValueType>::int_type FromHost(
165       ValueType v);
166   static typename tofromhost_value_type_traits<ValueType>::int_type ToHost(
167       ValueType v);
168 };
169 
170 // Utilities to convert numbers between the current hosts's native byte
171 // order and little-endian byte order
172 //
173 // Load/Store methods are alignment safe
174 class LittleEndian {
175  public:
176   // Conversion functions.
177 #ifdef IS_LITTLE_ENDIAN
178 
FromHost16(uint16 x)179   static uint16 FromHost16(uint16 x) { return x; }
ToHost16(uint16 x)180   static uint16 ToHost16(uint16 x) { return x; }
181 
FromHost32(uint32 x)182   static uint32 FromHost32(uint32 x) { return x; }
ToHost32(uint32 x)183   static uint32 ToHost32(uint32 x) { return x; }
184 
FromHost64(uint64 x)185   static uint64 FromHost64(uint64 x) { return x; }
ToHost64(uint64 x)186   static uint64 ToHost64(uint64 x) { return x; }
187 
FromHost128(absl::uint128 x)188   static absl::uint128 FromHost128(absl::uint128 x) { return x; }
ToHost128(absl::uint128 x)189   static absl::uint128 ToHost128(absl::uint128 x) { return x; }
190 
IsLittleEndian()191   static constexpr bool IsLittleEndian() { return true; }
192 
193 #elif defined IS_BIG_ENDIAN
194 
195   static uint16 FromHost16(uint16 x) { return gbswap_16(x); }
196   static uint16 ToHost16(uint16 x) { return gbswap_16(x); }
197 
198   static uint32 FromHost32(uint32 x) { return gbswap_32(x); }
199   static uint32 ToHost32(uint32 x) { return gbswap_32(x); }
200 
201   static uint64 FromHost64(uint64 x) { return gbswap_64(x); }
202   static uint64 ToHost64(uint64 x) { return gbswap_64(x); }
203 
204   static absl::uint128 FromHost128(absl::uint128 x) { return gbswap_128(x); }
205   static absl::uint128 ToHost128(absl::uint128 x) { return gbswap_128(x); }
206 
207   static constexpr bool IsLittleEndian() { return false; }
208 
209 #endif /* ENDIAN */
210 
211   // Unified LittleEndian::FromHost(ValueType v) API.
212   template <class ValueType>
FromHost(ValueType v)213   static typename tofromhost_value_type_traits<ValueType>::int_type FromHost(
214       ValueType v) {
215     return GeneralFormatConverter<LittleEndian, ValueType>::FromHost(v);
216   }
217 
218   // Unified LittleEndian::ToHost(ValueType v) API.
219   template <class ValueType>
ToHost(ValueType v)220   static typename tofromhost_value_type_traits<ValueType>::value_type ToHost(
221       ValueType v) {
222     return GeneralFormatConverter<LittleEndian, ValueType>::ToHost(v);
223   }
224 
225   // Functions to do unaligned loads and stores in little-endian order.
Load16(const void * p)226   static uint16 Load16(const void *p) {
227     return ToHost16(UNALIGNED_LOAD16(p));
228   }
229 
Store16(void * p,uint16 v)230   static void Store16(void *p, uint16 v) {
231     UNALIGNED_STORE16(p, FromHost16(v));
232   }
233 
Load24(const void * p)234   static uint32 Load24(const void* p) {
235 #ifdef IS_LITTLE_ENDIAN
236     uint32 result = 0;
237     memcpy(&result, p, 3);
238     return result;
239 #else
240     const uint8* data = reinterpret_cast<const uint8*>(p);
241     return Load16(data) + (data[2] << 16);
242 #endif
243   }
244 
Store24(void * p,uint32 v)245   static void Store24(void* p, uint32 v) {
246 #ifdef IS_LITTLE_ENDIAN
247     memcpy(p, &v, 3);
248 #else
249     uint8* data = reinterpret_cast<uint8*>(p);
250     data[0] = v & 0xFF;
251     data[1] = (v >> 8) & 0xFF;
252     data[2] = (v >> 16) & 0xFF;
253 #endif
254   }
255 
Load32(const void * p)256   static uint32 Load32(const void *p) {
257     return ToHost32(UNALIGNED_LOAD32(p));
258   }
259 
Store32(void * p,uint32 v)260   static void Store32(void *p, uint32 v) {
261     UNALIGNED_STORE32(p, FromHost32(v));
262   }
263 
Load64(const void * p)264   static uint64 Load64(const void *p) {
265     return ToHost64(UNALIGNED_LOAD64(p));
266   }
267 
268   // Build a uint64 from 1-8 bytes.
269   // 8 * len least significant bits are loaded from the memory with
270   // LittleEndian order. The 64 - 8 * len most significant bits are
271   // set all to 0.
272   // In latex-friendly words, this function returns:
273   //     $\sum_{i=0}^{len-1} p[i] 256^{i}$, where p[i] is unsigned.
274   //
275   // This function is equivalent to:
276   // uint64 val = 0;
277   // memcpy(&val, p, len);
278   // return ToHost64(val);
279   // TODO(jyrki): write a small benchmark and benchmark the speed
280   // of a memcpy based approach.
281   //
282   // For speed reasons this function does not work for len == 0.
283   // The caller needs to guarantee that 1 <= len <= 8.
Load64VariableLength(const void * const p,int len)284   static uint64 Load64VariableLength(const void * const p, int len) {
285     assert(len >= 1 && len <= 8);
286     const char * const buf = static_cast<const char * const>(p);
287     uint64 val = 0;
288     --len;
289     do {
290       val = (val << 8) | buf[len];
291       // (--len >= 0) is about 10 % faster than (len--) in some benchmarks.
292     } while (--len >= 0);
293     // No ToHost64(...) needed. The bytes are accessed in little-endian manner
294     // on every architecture.
295     return val;
296   }
297 
Store64(void * p,uint64 v)298   static void Store64(void *p, uint64 v) {
299     UNALIGNED_STORE64(p, FromHost64(v));
300   }
301 
Load128(const void * p)302   static absl::uint128 Load128(const void* p) {
303     return absl::MakeUint128(
304         ToHost64(UNALIGNED_LOAD64(reinterpret_cast<const uint64*>(p) + 1)),
305         ToHost64(UNALIGNED_LOAD64(p)));
306   }
307 
Store128(void * p,const absl::uint128 v)308   static void Store128(void* p, const absl::uint128 v) {
309     UNALIGNED_STORE64(p, FromHost64(absl::Uint128Low64(v)));
310     UNALIGNED_STORE64(reinterpret_cast<uint64*>(p) + 1,
311                       FromHost64(absl::Uint128High64(v)));
312   }
313 
314   // Build a uint128 from 1-16 bytes.
315   // 8 * len least significant bits are loaded from the memory with
316   // LittleEndian order. The 128 - 8 * len most significant bits are
317   // set all to 0.
Load128VariableLength(const void * p,int len)318   static absl::uint128 Load128VariableLength(const void* p, int len) {
319     if (len <= 8) {
320       return absl::uint128(Load64VariableLength(p, len));
321     } else {
322       return absl::MakeUint128(
323           Load64VariableLength(static_cast<const char*>(p) + 8, len - 8),
324           Load64(p));
325     }
326   }
327 
328   // Load & Store in machine's word size.
LoadUnsignedWord(const void * p)329   static uword_t LoadUnsignedWord(const void *p) {
330     if (sizeof(uword_t) == 8)
331       return Load64(p);
332     else
333       return Load32(p);
334   }
335 
StoreUnsignedWord(void * p,uword_t v)336   static void StoreUnsignedWord(void *p, uword_t v) {
337     if (sizeof(v) == 8)
338       Store64(p, v);
339     else
340       Store32(p, v);
341   }
342 
343   // Unified LittleEndian::Load/Store<T> API.
344 
345   // Returns the T value encoded by the leading bytes of 'p', interpreted
346   // according to the format specified below. 'p' has no alignment restrictions.
347   //
348   // Type              Format
349   // ----------------  -------------------------------------------------------
350   // uint{8,16,32,64}  Little-endian binary representation.
351   // int{8,16,32,64}   Little-endian twos-complement binary representation.
352   // float,double      Little-endian IEEE-754 format.
353   // char              The raw byte.
354   // bool              A byte. 0 maps to false; all other values map to true.
355   template<typename T>
356   static T Load(const char* p);
357 
358   // Encodes 'value' in the format corresponding to T. Supported types are
359   // described in Load<T>(). 'p' has no alignment restrictions. In-place Store
360   // is safe (that is, it is safe to call
361   // Store(x, reinterpret_cast<char*>(&x))).
362   template<typename T>
363   static void Store(T value, char* p);
364 };
365 
366 // Utilities to convert numbers between the current hosts's native byte
367 // order and big-endian byte order (same as network byte order)
368 //
369 // Load/Store methods are alignment safe
370 class BigEndian {
371  public:
372 #ifdef IS_LITTLE_ENDIAN
373 
FromHost16(uint16 x)374   static uint16 FromHost16(uint16 x) { return gbswap_16(x); }
ToHost16(uint16 x)375   static uint16 ToHost16(uint16 x) { return gbswap_16(x); }
376 
FromHost32(uint32 x)377   static uint32 FromHost32(uint32 x) { return gbswap_32(x); }
ToHost32(uint32 x)378   static uint32 ToHost32(uint32 x) { return gbswap_32(x); }
379 
FromHost64(uint64 x)380   static uint64 FromHost64(uint64 x) { return gbswap_64(x); }
ToHost64(uint64 x)381   static uint64 ToHost64(uint64 x) { return gbswap_64(x); }
382 
FromHost128(absl::uint128 x)383   static absl::uint128 FromHost128(absl::uint128 x) { return gbswap_128(x); }
ToHost128(absl::uint128 x)384   static absl::uint128 ToHost128(absl::uint128 x) { return gbswap_128(x); }
385 
IsLittleEndian()386   static constexpr bool IsLittleEndian() { return true; }
387 
388 #elif defined IS_BIG_ENDIAN
389 
390   static uint16 FromHost16(uint16 x) { return x; }
391   static uint16 ToHost16(uint16 x) { return x; }
392 
393   static uint32 FromHost32(uint32 x) { return x; }
394   static uint32 ToHost32(uint32 x) { return x; }
395 
396   static uint64 FromHost64(uint64 x) { return x; }
397   static uint64 ToHost64(uint64 x) { return x; }
398 
399   static absl::uint128 FromHost128(absl::uint128 x) { return x; }
400   static absl::uint128 ToHost128(absl::uint128 x) { return x; }
401 
402   static constexpr bool IsLittleEndian() { return false; }
403 
404 #endif /* ENDIAN */
405 
406   // Unified BigEndian::FromHost(ValueType v) API.
407   template <class ValueType>
FromHost(ValueType v)408   static typename tofromhost_value_type_traits<ValueType>::int_type FromHost(
409       ValueType v) {
410     return GeneralFormatConverter<BigEndian, ValueType>::FromHost(v);
411   }
412 
413   // Unified BigEndian::ToHost(ValueType v) API.
414   template <class ValueType>
ToHost(ValueType v)415   static typename tofromhost_value_type_traits<ValueType>::value_type ToHost(
416       ValueType v) {
417     return GeneralFormatConverter<BigEndian, ValueType>::ToHost(v);
418   }
419 
420   // Functions to do unaligned loads and stores in big-endian order.
Load16(const void * p)421   static uint16 Load16(const void *p) {
422     return ToHost16(UNALIGNED_LOAD16(p));
423   }
424 
Store16(void * p,uint16 v)425   static void Store16(void *p, uint16 v) {
426     UNALIGNED_STORE16(p, FromHost16(v));
427   }
428 
Load24(const void * p)429   static uint32 Load24(const void* p) {
430     const uint8* data = reinterpret_cast<const uint8*>(p);
431     return (data[0] << 16) + Load16(data + 1);
432   }
433 
Store24(void * p,uint32 v)434   static void Store24(void* p, uint32 v) {
435     uint8* data = reinterpret_cast<uint8*>(p);
436     Store16(data + 1, static_cast<uint16>(v));
437     *data = static_cast<uint8>(v >> 16);
438   }
439 
Load32(const void * p)440   static uint32 Load32(const void *p) {
441     return ToHost32(UNALIGNED_LOAD32(p));
442   }
443 
Store32(void * p,uint32 v)444   static void Store32(void *p, uint32 v) {
445     UNALIGNED_STORE32(p, FromHost32(v));
446   }
447 
Load64(const void * p)448   static uint64 Load64(const void *p) {
449     return ToHost64(UNALIGNED_LOAD64(p));
450   }
451 
452   // Semantically build a uint64 from 1-8 bytes.
453   // 8 * len least significant bits are loaded from the memory with
454   // BigEndian order. The 64 - 8 * len most significant bits are
455   // set all to 0.
456   // In latex-friendly words, this function returns:
457   //     $\sum_{i=0}^{len-1} p[i] 256^{i}$, where p[i] is unsigned.
458   //
459   // This function is equivalent to:
460   // uint64 val = 0;
461   // memcpy(&val, p, len);
462   // return ToHost64(val);
463   // TODO(jyrki): write a small benchmark and benchmark the speed
464   // of a memcpy based approach.
465   //
466   // For speed reasons this function does not work for len == 0.
467   // The caller needs to guarantee that 1 <= len <= 8.
468 
Load64VariableLength(const void * const p,int len)469   static uint64 Load64VariableLength(const void * const p, int len) {
470     //    uint64 val = LittleEndian::Load64VariableLength(p, len);
471     //    return Load64(&val) >> (8*(8-len));
472     assert(len >= 1 && len <= 8);
473     const char* buf = static_cast<const char * const>(p);
474     uint64 val = 0;
475     do {
476       val = (val << 8) | *buf;
477       ++buf;
478     } while (--len > 0);
479     return val;
480   }
481 
Store64(void * p,uint64 v)482   static void Store64(void *p, uint64 v) {
483     UNALIGNED_STORE64(p, FromHost64(v));
484   }
485 
Load128(const void * p)486   static absl::uint128 Load128(const void* p) {
487     return absl::MakeUint128(
488         ToHost64(UNALIGNED_LOAD64(p)),
489         ToHost64(UNALIGNED_LOAD64(reinterpret_cast<const uint64*>(p) + 1)));
490   }
491 
Store128(void * p,const absl::uint128 v)492   static void Store128(void* p, const absl::uint128 v) {
493     UNALIGNED_STORE64(p, FromHost64(absl::Uint128High64(v)));
494     UNALIGNED_STORE64(reinterpret_cast<uint64*>(p) + 1,
495                       FromHost64(absl::Uint128Low64(v)));
496   }
497 
498   // Build a uint128 from 1-16 bytes.
499   // 8 * len least significant bits are loaded from the memory with
500   // BigEndian order. The 128 - 8 * len most significant bits are
501   // set all to 0.
Load128VariableLength(const void * p,int len)502   static absl::uint128 Load128VariableLength(const void* p, int len) {
503     if (len <= 8) {
504       return absl::uint128(
505           Load64VariableLength(static_cast<const char*>(p), len));
506     } else if (len < 16) {
507       return absl::MakeUint128(Load64VariableLength(p, len - 8),
508                                Load64(static_cast<const char*>(p) + len - 8));
509     } else {
510       return absl::MakeUint128(Load64(static_cast<const char*>(p)),
511                                Load64(static_cast<const char*>(p) + 8));
512     }
513   }
514 
515   // Load & Store in machine's word size.
LoadUnsignedWord(const void * p)516   static uword_t LoadUnsignedWord(const void *p) {
517     if (sizeof(uword_t) == 8)
518       return Load64(p);
519     else
520       return Load32(p);
521   }
522 
StoreUnsignedWord(void * p,uword_t v)523   static void StoreUnsignedWord(void *p, uword_t v) {
524     if (sizeof(uword_t) == 8)
525       Store64(p, v);
526     else
527       Store32(p, v);
528   }
529 
530   // Unified BigEndian::Load/Store<T> API.
531 
532   // Returns the T value encoded by the leading bytes of 'p', interpreted
533   // according to the format specified below. 'p' has no alignment restrictions.
534   //
535   // Type              Format
536   // ----------------  -------------------------------------------------------
537   // uint{8,16,32,64}  Big-endian binary representation.
538   // int{8,16,32,64}   Big-endian twos-complement binary representation.
539   // float,double      Big-endian IEEE-754 format.
540   // char              The raw byte.
541   // bool              A byte. 0 maps to false; all other values map to true.
542   template<typename T>
543   static T Load(const char* p);
544 
545   // Encodes 'value' in the format corresponding to T. Supported types are
546   // described in Load<T>(). 'p' has no alignment restrictions. In-place Store
547   // is safe (that is, it is safe to call
548   // Store(x, reinterpret_cast<char*>(&x))).
549   template<typename T>
550   static void Store(T value, char* p);
551 };  // BigEndian
552 
553 // Network byte order is big-endian
554 typedef BigEndian NetworkByteOrder;
555 
556 //////////////////////////////////////////////////////////////////////
557 // Implementation details: Clients can stop reading here.
558 //
559 // Define ValueType->IntType mapping for the unified
560 // "IntType FromHost(ValueType)" API. The mapping is implemented via
561 // tofromhost_value_type_traits trait struct. Every legal ValueType has its own
562 // specialization. There is no default body for this trait struct, so that
563 // any type that is not supported by the unified FromHost API
564 // will trigger a compile time error.
565 #define FROMHOST_TYPE_MAP(ITYPE, VTYPE)        \
566   template <>                                  \
567   struct tofromhost_value_type_traits<VTYPE> { \
568     typedef VTYPE value_type;                  \
569     typedef ITYPE int_type;                    \
570   };
571 
572 FROMHOST_TYPE_MAP(uint8, uint8);
573 FROMHOST_TYPE_MAP(uint8, int8);
574 FROMHOST_TYPE_MAP(uint16, uint16);
575 FROMHOST_TYPE_MAP(uint16, int16);
576 FROMHOST_TYPE_MAP(uint32, uint32);
577 FROMHOST_TYPE_MAP(uint32, int32);
578 FROMHOST_TYPE_MAP(uint64, uint64);
579 FROMHOST_TYPE_MAP(uint64, int64);
580 FROMHOST_TYPE_MAP(uint32, float);
581 FROMHOST_TYPE_MAP(uint64, double);
582 FROMHOST_TYPE_MAP(uint8, bool);
583 FROMHOST_TYPE_MAP(absl::uint128, absl::uint128);
584 #undef FROMHOST_TYPE_MAP
585 
586 // Default implementation for the unified FromHost(ValueType) API, which
587 // handles all integral types (ValueType is one of uint8, int8, uint16, int16,
588 // uint32, int32, uint64, int64). The compiler will remove the switch case
589 // branches and unnecessary static_cast, when the template is expanded.
590 template <class EndianClass, typename ValueType>
591 typename tofromhost_value_type_traits<ValueType>::int_type
FromHost(ValueType v)592 GeneralFormatConverter<EndianClass, ValueType>::FromHost(ValueType v) {
593   switch (sizeof(ValueType)) {
594     case 1:
595       return static_cast<uint8>(v);
596       break;
597     case 2:
598       return EndianClass::FromHost16(static_cast<uint16>(v));
599       break;
600     case 4:
601       return EndianClass::FromHost32(static_cast<uint32>(v));
602       break;
603     case 8:
604       return EndianClass::FromHost64(static_cast<uint64>(v));
605       break;
606     default:
607       S2_LOG(FATAL) << "Unexpected value size: " << sizeof(ValueType);
608   }
609 }
610 
611 // Default implementation for the unified ToHost(ValueType) API, which handles
612 // all integral types (ValueType is one of uint8, int8, uint16, int16, uint32,
613 // int32, uint64, int64). The compiler will remove the switch case branches and
614 // unnecessary static_cast, when the template is expanded.
615 template <class EndianClass, typename ValueType>
616 typename tofromhost_value_type_traits<ValueType>::int_type
ToHost(ValueType v)617 GeneralFormatConverter<EndianClass, ValueType>::ToHost(ValueType v) {
618   switch (sizeof(ValueType)) {
619     case 1:
620       return static_cast<uint8>(v);
621       break;
622     case 2:
623       return EndianClass::ToHost16(static_cast<uint16>(v));
624       break;
625     case 4:
626       return EndianClass::ToHost32(static_cast<uint32>(v));
627       break;
628     case 8:
629       return EndianClass::ToHost64(static_cast<uint64>(v));
630       break;
631     default:
632       S2_LOG(FATAL) << "Unexpected value size: " << sizeof(ValueType);
633   }
634 }
635 
636 // Specialization of the unified FromHost(ValueType) API, which handles
637 // float types (ValueType is float).
638 template <class EndianClass>
639 class GeneralFormatConverter<EndianClass, float> {
640  public:
FromHost(float v)641   static typename tofromhost_value_type_traits<float>::int_type FromHost(
642       float v) {
643     return EndianClass::FromHost32(absl::bit_cast<uint32>(v));
644   }
ToHost(float v)645   static typename tofromhost_value_type_traits<float>::int_type ToHost(
646       float v) {
647     return absl::bit_cast<float>(
648         EndianClass::ToHost32(absl::bit_cast<uint32>(v)));
649   }
650 };
651 
652 // Specialization of the unified FromHost(ValueType) API, which handles
653 // double types (ValueType is double).
654 template <class EndianClass>
655 class GeneralFormatConverter<EndianClass, double> {
656  public:
FromHost(double v)657   static typename tofromhost_value_type_traits<double>::int_type FromHost(
658       double v) {
659     return EndianClass::FromHost64(absl::bit_cast<uint64>(v));
660   }
ToHost(double v)661   static typename tofromhost_value_type_traits<double>::int_type ToHost(
662       double v) {
663     return absl::bit_cast<double>(
664         EndianClass::ToHost64(absl::bit_cast<uint64>(v)));
665   }
666 };
667 
668 // Specialization of the unified FromHost(ValueType) API, which handles
669 // uint128 types (ValueType is uint128).
670 template <class EndianClass>
671 class GeneralFormatConverter<EndianClass, absl::uint128> {
672  public:
673   static typename tofromhost_value_type_traits<absl::uint128>::int_type
FromHost(absl::uint128 v)674   FromHost(absl::uint128 v) {
675     return EndianClass::FromHost128(v);
676   }
ToHost(absl::uint128 v)677   static typename tofromhost_value_type_traits<absl::uint128>::int_type ToHost(
678       absl::uint128 v) {
679     return EndianClass::ToHost128(v);
680   }
681 };
682 
683 namespace endian_internal {
684 // Integer helper methods for the unified Load/Store APIs.
685 
686 // Which branch of the 'case' to use is decided at compile time, so despite the
687 // apparent size of this function, it compiles into efficient code.
688 template<typename EndianClass, typename T>
LoadInteger(const char * p)689 inline T LoadInteger(const char* p) {
690   static_assert(sizeof(T) <= 8 && std::is_integral<T>::value,
691                 "T needs to be an integral type with size <= 8.");
692   switch (sizeof(T)) {
693     case 1: return *reinterpret_cast<const T*>(p);
694     case 2: return EndianClass::ToHost16(UNALIGNED_LOAD16(p));
695     case 4: return EndianClass::ToHost32(UNALIGNED_LOAD32(p));
696     case 8: return EndianClass::ToHost64(UNALIGNED_LOAD64(p));
697     default: {
698       S2_LOG(FATAL) << "Not reached!";
699       return 0;
700     }
701   }
702 }
703 
704 // Which branch of the 'case' to use is decided at compile time, so despite the
705 // apparent size of this function, it compiles into efficient code.
706 template<typename EndianClass, typename T>
StoreInteger(T value,char * p)707 inline void StoreInteger(T value, char* p) {
708   static_assert(sizeof(T) <= 8 && std::is_integral<T>::value,
709                 "T needs to be an integral type with size <= 8.");
710   switch (sizeof(T)) {
711     case 1: *reinterpret_cast<T*>(p) = value; break;
712     case 2: UNALIGNED_STORE16(p, EndianClass::FromHost16(value)); break;
713     case 4: UNALIGNED_STORE32(p, EndianClass::FromHost32(value)); break;
714     case 8: UNALIGNED_STORE64(p, EndianClass::FromHost64(value)); break;
715     default: {
716       S2_LOG(FATAL) << "Not reached!";
717     }
718   }
719 }
720 
721 // Floating point helper methods for the unified Load/Store APIs.
722 
723 template<typename EndianClass>
LoadFloat(const char * p)724 inline float LoadFloat(const char* p) {
725   return absl::bit_cast<float>(EndianClass::ToHost32(UNALIGNED_LOAD32(p)));
726 }
727 
728 template<typename EndianClass>
StoreFloat(float value,char * p)729 inline void StoreFloat(float value, char* p) {
730   UNALIGNED_STORE32(p, EndianClass::FromHost32(absl::bit_cast<uint32>(value)));
731 }
732 
733 template<typename EndianClass>
LoadDouble(const char * p)734 inline double LoadDouble(const char* p) {
735   return absl::bit_cast<double>(EndianClass::ToHost64(UNALIGNED_LOAD64(p)));
736 }
737 
738 template<typename EndianClass>
StoreDouble(double value,char * p)739 inline void StoreDouble(double value, char* p) {
740   UNALIGNED_STORE64(p, EndianClass::FromHost64(absl::bit_cast<uint64>(value)));
741 }
742 
743 }  // namespace endian_internal
744 
745 // Load/Store for integral values.
746 
747 template<typename T>
Load(const char * p)748 inline T LittleEndian::Load(const char* p) {
749   return endian_internal::LoadInteger<LittleEndian, T>(p);
750 }
751 
752 template<typename T>
Store(T value,char * p)753 inline void LittleEndian::Store(T value, char* p) {
754   endian_internal::StoreInteger<LittleEndian, T>(value, p);
755 }
756 
757 template<typename T>
Load(const char * p)758 inline T BigEndian::Load(const char* p) {
759   return endian_internal::LoadInteger<BigEndian, T>(p);
760 }
761 
762 template<typename T>
Store(T value,char * p)763 inline void BigEndian::Store(T value, char* p) {
764   endian_internal::StoreInteger<BigEndian, T>(value, p);
765 }
766 
767 // Load/Store for bool. Sanitizes bool on the way in for safety.
768 
769 template<>
770 inline bool LittleEndian::Load<bool>(const char* p) {
771   static_assert(sizeof(bool) == 1, "Unexpected sizeof(bool)");
772   return *p != 0;
773 }
774 
775 template<>
776 inline void LittleEndian::Store<bool>(bool value, char* p) {
777   static_assert(sizeof(bool) == 1, "Unexpected sizeof(bool)");
778   *p = value ? 1 : 0;
779 }
780 
781 template<>
782 inline bool BigEndian::Load<bool>(const char* p) {
783   static_assert(sizeof(bool) == 1, "Unexpected sizeof(bool)");
784   return *p != 0;
785 }
786 
787 template<>
788 inline void BigEndian::Store<bool>(bool value, char* p) {
789   static_assert(sizeof(bool) == 1, "Unexpected sizeof(bool)");
790   *p = value ? 1 : 0;
791 }
792 
793 // Load/Store for float.
794 
795 template<>
796 inline float LittleEndian::Load<float>(const char* p) {
797   return endian_internal::LoadFloat<LittleEndian>(p);
798 }
799 
800 template<>
801 inline void LittleEndian::Store<float>(float value, char* p) {
802   endian_internal::StoreFloat<LittleEndian>(value, p);
803 }
804 
805 template<>
806 inline float BigEndian::Load<float>(const char* p) {
807   return endian_internal::LoadFloat<BigEndian>(p);
808 }
809 
810 template<>
811 inline void BigEndian::Store<float>(float value, char* p) {
812   endian_internal::StoreFloat<BigEndian>(value, p);
813 }
814 
815 // Load/Store for double.
816 
817 template<>
818 inline double LittleEndian::Load<double>(const char* p) {
819   return endian_internal::LoadDouble<LittleEndian>(p);
820 }
821 
822 template<>
823 inline void LittleEndian::Store<double>(double value, char* p) {
824   endian_internal::StoreDouble<LittleEndian>(value, p);
825 }
826 
827 template<>
828 inline double BigEndian::Load<double>(const char* p) {
829   return endian_internal::LoadDouble<BigEndian>(p);
830 }
831 
832 template<>
833 inline void BigEndian::Store<double>(double value, char* p) {
834   endian_internal::StoreDouble<BigEndian>(value, p);
835 }
836 
837 // Load/Store for uint128.
838 
839 template <>
840 inline absl::uint128 LittleEndian::Load<absl::uint128>(const char* p) {
841   return LittleEndian::Load128(p);
842 }
843 
844 template <>
845 inline void LittleEndian::Store<absl::uint128>(absl::uint128 value, char* p) {
846   LittleEndian::Store128(p, value);
847 }
848 
849 template <>
850 inline absl::uint128 BigEndian::Load<absl::uint128>(const char* p) {
851   return BigEndian::Load128(p);
852 }
853 
854 template <>
855 inline void BigEndian::Store<absl::uint128>(absl::uint128 value, char* p) {
856   BigEndian::Store128(p, value);
857 }
858 
859 #endif  // S2_UTIL_ENDIAN_ENDIAN_H_
860