1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 
16 #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
17 #define ABSL_BASE_INTERNAL_ENDIAN_H_
18 
19 // The following guarantees declaration of the byte swap functions
20 #ifdef _MSC_VER
21 #include <stdlib.h>  // NOLINT(build/include)
22 #elif defined(__FreeBSD__)
23 #include <sys/endian.h>
24 #elif defined(__GLIBC__)
25 #include <byteswap.h>  // IWYU pragma: export
26 #endif
27 
28 #include <cstdint>
29 #include "absl/base/config.h"
30 #include "absl/base/internal/unaligned_access.h"
31 #include "absl/base/port.h"
32 
33 namespace absl {
34 ABSL_NAMESPACE_BEGIN
35 
36 // Use compiler byte-swapping intrinsics if they are available.  32-bit
37 // and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
38 // The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
39 // For simplicity, we enable them all only for GCC 4.8.0 or later.
40 #if defined(__clang__) || \
41     (defined(__GNUC__) && \
42      ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
gbswap_64(uint64_t host_int)43 inline uint64_t gbswap_64(uint64_t host_int) {
44   return __builtin_bswap64(host_int);
45 }
gbswap_32(uint32_t host_int)46 inline uint32_t gbswap_32(uint32_t host_int) {
47   return __builtin_bswap32(host_int);
48 }
gbswap_16(uint16_t host_int)49 inline uint16_t gbswap_16(uint16_t host_int) {
50   return __builtin_bswap16(host_int);
51 }
52 
53 #elif defined(_MSC_VER)
54 inline uint64_t gbswap_64(uint64_t host_int) {
55   return _byteswap_uint64(host_int);
56 }
57 inline uint32_t gbswap_32(uint32_t host_int) {
58   return _byteswap_ulong(host_int);
59 }
60 inline uint16_t gbswap_16(uint16_t host_int) {
61   return _byteswap_ushort(host_int);
62 }
63 
64 #else
65 inline uint64_t gbswap_64(uint64_t host_int) {
66 #if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
67   // Adapted from /usr/include/byteswap.h.  Not available on Mac.
68   if (__builtin_constant_p(host_int)) {
69     return __bswap_constant_64(host_int);
70   } else {
71     uint64_t result;
72     __asm__("bswap %0" : "=r"(result) : "0"(host_int));
73     return result;
74   }
75 #elif defined(__GLIBC__)
76   return bswap_64(host_int);
77 #else
78   return (((host_int & uint64_t{0xFF}) << 56) |
79           ((host_int & uint64_t{0xFF00}) << 40) |
80           ((host_int & uint64_t{0xFF0000}) << 24) |
81           ((host_int & uint64_t{0xFF000000}) << 8) |
82           ((host_int & uint64_t{0xFF00000000}) >> 8) |
83           ((host_int & uint64_t{0xFF0000000000}) >> 24) |
84           ((host_int & uint64_t{0xFF000000000000}) >> 40) |
85           ((host_int & uint64_t{0xFF00000000000000}) >> 56));
86 #endif  // bswap_64
87 }
88 
89 inline uint32_t gbswap_32(uint32_t host_int) {
90 #if defined(__GLIBC__)
91   return bswap_32(host_int);
92 #else
93   return (((host_int & uint32_t{0xFF}) << 24) |
94           ((host_int & uint32_t{0xFF00}) << 8) |
95           ((host_int & uint32_t{0xFF0000}) >> 8) |
96           ((host_int & uint32_t{0xFF000000}) >> 24));
97 #endif
98 }
99 
100 inline uint16_t gbswap_16(uint16_t host_int) {
101 #if defined(__GLIBC__)
102   return bswap_16(host_int);
103 #else
104   return (((host_int & uint16_t{0xFF}) << 8) |
105           ((host_int & uint16_t{0xFF00}) >> 8));
106 #endif
107 }
108 
109 #endif  // intrinsics available
110 
111 #ifdef ABSL_IS_LITTLE_ENDIAN
112 
113 // Definitions for ntohl etc. that don't require us to include
114 // netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
115 // than just #defining them because in debug mode, gcc doesn't
116 // correctly handle the (rather involved) definitions of bswap_32.
117 // gcc guarantees that inline functions are as fast as macros, so
118 // this isn't a performance hit.
ghtons(uint16_t x)119 inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
ghtonl(uint32_t x)120 inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
ghtonll(uint64_t x)121 inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
122 
123 #elif defined ABSL_IS_BIG_ENDIAN
124 
125 // These definitions are simpler on big-endian machines
126 // These are functions instead of macros to avoid self-assignment warnings
127 // on calls such as "i = ghtnol(i);".  This also provides type checking.
ghtons(uint16_t x)128 inline uint16_t ghtons(uint16_t x) { return x; }
ghtonl(uint32_t x)129 inline uint32_t ghtonl(uint32_t x) { return x; }
ghtonll(uint64_t x)130 inline uint64_t ghtonll(uint64_t x) { return x; }
131 
132 #else
133 #error \
134     "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
135        "ABSL_IS_LITTLE_ENDIAN must be defined"
136 #endif  // byte order
137 
gntohs(uint16_t x)138 inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
gntohl(uint32_t x)139 inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
gntohll(uint64_t x)140 inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
141 
142 // Utilities to convert numbers between the current hosts's native byte
143 // order and little-endian byte order
144 //
145 // Load/Store methods are alignment safe
146 namespace little_endian {
147 // Conversion functions.
148 #ifdef ABSL_IS_LITTLE_ENDIAN
149 
FromHost16(uint16_t x)150 inline uint16_t FromHost16(uint16_t x) { return x; }
ToHost16(uint16_t x)151 inline uint16_t ToHost16(uint16_t x) { return x; }
152 
FromHost32(uint32_t x)153 inline uint32_t FromHost32(uint32_t x) { return x; }
ToHost32(uint32_t x)154 inline uint32_t ToHost32(uint32_t x) { return x; }
155 
FromHost64(uint64_t x)156 inline uint64_t FromHost64(uint64_t x) { return x; }
ToHost64(uint64_t x)157 inline uint64_t ToHost64(uint64_t x) { return x; }
158 
IsLittleEndian()159 inline constexpr bool IsLittleEndian() { return true; }
160 
161 #elif defined ABSL_IS_BIG_ENDIAN
162 
163 inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
164 inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
165 
166 inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
167 inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
168 
169 inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
170 inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
171 
172 inline constexpr bool IsLittleEndian() { return false; }
173 
174 #endif /* ENDIAN */
175 
176 // Functions to do unaligned loads and stores in little-endian order.
Load16(const void * p)177 inline uint16_t Load16(const void *p) {
178   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
179 }
180 
Store16(void * p,uint16_t v)181 inline void Store16(void *p, uint16_t v) {
182   ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
183 }
184 
Load32(const void * p)185 inline uint32_t Load32(const void *p) {
186   return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
187 }
188 
Store32(void * p,uint32_t v)189 inline void Store32(void *p, uint32_t v) {
190   ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
191 }
192 
Load64(const void * p)193 inline uint64_t Load64(const void *p) {
194   return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
195 }
196 
Store64(void * p,uint64_t v)197 inline void Store64(void *p, uint64_t v) {
198   ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
199 }
200 
201 }  // namespace little_endian
202 
203 // Utilities to convert numbers between the current hosts's native byte
204 // order and big-endian byte order (same as network byte order)
205 //
206 // Load/Store methods are alignment safe
207 namespace big_endian {
208 #ifdef ABSL_IS_LITTLE_ENDIAN
209 
FromHost16(uint16_t x)210 inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
ToHost16(uint16_t x)211 inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
212 
FromHost32(uint32_t x)213 inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
ToHost32(uint32_t x)214 inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
215 
FromHost64(uint64_t x)216 inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
ToHost64(uint64_t x)217 inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
218 
IsLittleEndian()219 inline constexpr bool IsLittleEndian() { return true; }
220 
221 #elif defined ABSL_IS_BIG_ENDIAN
222 
223 inline uint16_t FromHost16(uint16_t x) { return x; }
224 inline uint16_t ToHost16(uint16_t x) { return x; }
225 
226 inline uint32_t FromHost32(uint32_t x) { return x; }
227 inline uint32_t ToHost32(uint32_t x) { return x; }
228 
229 inline uint64_t FromHost64(uint64_t x) { return x; }
230 inline uint64_t ToHost64(uint64_t x) { return x; }
231 
232 inline constexpr bool IsLittleEndian() { return false; }
233 
234 #endif /* ENDIAN */
235 
236 // Functions to do unaligned loads and stores in big-endian order.
Load16(const void * p)237 inline uint16_t Load16(const void *p) {
238   return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
239 }
240 
Store16(void * p,uint16_t v)241 inline void Store16(void *p, uint16_t v) {
242   ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
243 }
244 
Load32(const void * p)245 inline uint32_t Load32(const void *p) {
246   return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
247 }
248 
Store32(void * p,uint32_t v)249 inline void Store32(void *p, uint32_t v) {
250   ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
251 }
252 
Load64(const void * p)253 inline uint64_t Load64(const void *p) {
254   return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
255 }
256 
Store64(void * p,uint64_t v)257 inline void Store64(void *p, uint64_t v) {
258   ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
259 }
260 
261 }  // namespace big_endian
262 
263 ABSL_NAMESPACE_END
264 }  // namespace absl
265 
266 #endif  // ABSL_BASE_INTERNAL_ENDIAN_H_
267