1 /*
2 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11 #ifndef MEM_H_MODULE
12 #define MEM_H_MODULE
13
14 #if defined (__cplusplus)
15 extern "C" {
16 #endif
17
18 /*-****************************************
19 * Dependencies
20 ******************************************/
21 #include <stddef.h> /* size_t, ptrdiff_t */
22 #include <string.h> /* memcpy */
23
24
25 /*-****************************************
26 * Compiler specifics
27 ******************************************/
28 #if defined(_MSC_VER) /* Visual Studio */
29 # include <stdlib.h> /* _byteswap_ulong */
30 # include <intrin.h> /* _byteswap_* */
31 #endif
32 #if defined(__GNUC__)
33 # define MEM_STATIC static __inline __attribute__((unused))
34 #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
35 # define MEM_STATIC static inline
36 #elif defined(_MSC_VER)
37 # define MEM_STATIC static __inline
38 #else
39 # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
40 #endif
41
42 #ifndef __has_builtin
43 # define __has_builtin(x) 0 /* compat. with non-clang compilers */
44 #endif
45
46 /* code only tested on 32 and 64 bits systems */
47 #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
MEM_check(void)48 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
49
50 /* detects whether we are being compiled under msan */
51 #if defined (__has_feature)
52 # if __has_feature(memory_sanitizer)
53 # define MEMORY_SANITIZER 1
54 # endif
55 #endif
56
57 #if defined (MEMORY_SANITIZER)
58 /* Not all platforms that support msan provide sanitizers/msan_interface.h.
59 * We therefore declare the functions we need ourselves, rather than trying to
60 * include the header file... */
61
62 #include <stdint.h> /* intptr_t */
63
64 /* Make memory region fully initialized (without changing its contents). */
65 void __msan_unpoison(const volatile void *a, size_t size);
66
67 /* Make memory region fully uninitialized (without changing its contents).
68 This is a legacy interface that does not update origin information. Use
69 __msan_allocated_memory() instead. */
70 void __msan_poison(const volatile void *a, size_t size);
71
72 /* Returns the offset of the first (at least partially) poisoned byte in the
73 memory range, or -1 if the whole range is good. */
74 intptr_t __msan_test_shadow(const volatile void *x, size_t size);
75 #endif
76
77 /* detects whether we are being compiled under asan */
78 #if defined (__has_feature)
79 # if __has_feature(address_sanitizer)
80 # define ADDRESS_SANITIZER 1
81 # endif
82 #elif defined(__SANITIZE_ADDRESS__)
83 # define ADDRESS_SANITIZER 1
84 #endif
85
86 #if defined (ADDRESS_SANITIZER)
87 /* Not all platforms that support asan provide sanitizers/asan_interface.h.
88 * We therefore declare the functions we need ourselves, rather than trying to
89 * include the header file... */
90
91 /**
92 * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
93 *
94 * This memory must be previously allocated by your program. Instrumented
95 * code is forbidden from accessing addresses in this region until it is
96 * unpoisoned. This function is not guaranteed to poison the entire region -
97 * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
98 * alignment restrictions.
99 *
100 * \note This function is not thread-safe because no two threads can poison or
101 * unpoison memory in the same memory region simultaneously.
102 *
103 * \param addr Start of memory region.
104 * \param size Size of memory region. */
105 void __asan_poison_memory_region(void const volatile *addr, size_t size);
106
107 /**
108 * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
109 *
110 * This memory must be previously allocated by your program. Accessing
111 * addresses in this region is allowed until this region is poisoned again.
112 * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
113 * to ASan alignment restrictions.
114 *
115 * \note This function is not thread-safe because no two threads can
116 * poison or unpoison memory in the same memory region simultaneously.
117 *
118 * \param addr Start of memory region.
119 * \param size Size of memory region. */
120 void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
121 #endif
122
123
124 /*-**************************************************************
125 * Basic Types
126 *****************************************************************/
127 #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
128 # include <stdint.h>
129 typedef uint8_t BYTE;
130 typedef uint16_t U16;
131 typedef int16_t S16;
132 typedef uint32_t U32;
133 typedef int32_t S32;
134 typedef uint64_t U64;
135 typedef int64_t S64;
136 #else
137 # include <limits.h>
138 #if CHAR_BIT != 8
139 # error "this implementation requires char to be exactly 8-bit type"
140 #endif
141 typedef unsigned char BYTE;
142 #if USHRT_MAX != 65535
143 # error "this implementation requires short to be exactly 16-bit type"
144 #endif
145 typedef unsigned short U16;
146 typedef signed short S16;
147 #if UINT_MAX != 4294967295
148 # error "this implementation requires int to be exactly 32-bit type"
149 #endif
150 typedef unsigned int U32;
151 typedef signed int S32;
152 /* note : there are no limits defined for long long type in C90.
153 * limits exist in C99, however, in such case, <stdint.h> is preferred */
154 typedef unsigned long long U64;
155 typedef signed long long S64;
156 #endif
157
158
159 /*-**************************************************************
160 * Memory I/O
161 *****************************************************************/
162 /* MEM_FORCE_MEMORY_ACCESS :
163 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
164 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
165 * The below switch allow to select different access method for improved performance.
166 * Method 0 (default) : use `memcpy()`. Safe and portable.
167 * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
168 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
169 * Method 2 : direct access. This method is portable but violate C standard.
170 * It can generate buggy code on targets depending on alignment.
171 * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
172 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
173 * Prefer these methods in priority order (0 > 1 > 2)
174 */
175 #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
176 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
177 # define MEM_FORCE_MEMORY_ACCESS 2
178 # elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
179 # define MEM_FORCE_MEMORY_ACCESS 1
180 # endif
181 #endif
182
MEM_32bits(void)183 MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
MEM_64bits(void)184 MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
185
MEM_isLittleEndian(void)186 MEM_STATIC unsigned MEM_isLittleEndian(void)
187 {
188 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
189 return one.c[0];
190 }
191
192 #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
193
194 /* violates C standard, by lying on structure alignment.
195 Only use if no other choice to achieve best performance on target platform */
MEM_read16(const void * memPtr)196 MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_read32(const void * memPtr)197 MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_read64(const void * memPtr)198 MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_readST(const void * memPtr)199 MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
200
MEM_write16(void * memPtr,U16 value)201 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
MEM_write32(void * memPtr,U32 value)202 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
MEM_write64(void * memPtr,U64 value)203 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
204
205 #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
206
207 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
208 /* currently only defined for gcc and icc */
209 #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
210 __pragma( pack(push, 1) )
211 typedef struct { U16 v; } unalign16;
212 typedef struct { U32 v; } unalign32;
213 typedef struct { U64 v; } unalign64;
214 typedef struct { size_t v; } unalignArch;
__pragma(pack (pop))215 __pragma( pack(pop) )
216 #else
217 typedef struct { U16 v; } __attribute__((packed)) unalign16;
218 typedef struct { U32 v; } __attribute__((packed)) unalign32;
219 typedef struct { U64 v; } __attribute__((packed)) unalign64;
220 typedef struct { size_t v; } __attribute__((packed)) unalignArch;
221 #endif
222
223 MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
MEM_read32(const void * ptr)224 MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
MEM_read64(const void * ptr)225 MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
MEM_readST(const void * ptr)226 MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
227
MEM_write16(void * memPtr,U16 value)228 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
MEM_write32(void * memPtr,U32 value)229 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
MEM_write64(void * memPtr,U64 value)230 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
231
232 #else
233
234 /* default method, safe and standard.
235 can sometimes prove slower */
236
MEM_read16(const void * memPtr)237 MEM_STATIC U16 MEM_read16(const void* memPtr)
238 {
239 U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
240 }
241
MEM_read32(const void * memPtr)242 MEM_STATIC U32 MEM_read32(const void* memPtr)
243 {
244 U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
245 }
246
MEM_read64(const void * memPtr)247 MEM_STATIC U64 MEM_read64(const void* memPtr)
248 {
249 U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
250 }
251
MEM_readST(const void * memPtr)252 MEM_STATIC size_t MEM_readST(const void* memPtr)
253 {
254 size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
255 }
256
MEM_write16(void * memPtr,U16 value)257 MEM_STATIC void MEM_write16(void* memPtr, U16 value)
258 {
259 memcpy(memPtr, &value, sizeof(value));
260 }
261
MEM_write32(void * memPtr,U32 value)262 MEM_STATIC void MEM_write32(void* memPtr, U32 value)
263 {
264 memcpy(memPtr, &value, sizeof(value));
265 }
266
MEM_write64(void * memPtr,U64 value)267 MEM_STATIC void MEM_write64(void* memPtr, U64 value)
268 {
269 memcpy(memPtr, &value, sizeof(value));
270 }
271
272 #endif /* MEM_FORCE_MEMORY_ACCESS */
273
MEM_swap32(U32 in)274 MEM_STATIC U32 MEM_swap32(U32 in)
275 {
276 #if (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
277 || (defined(__clang__) && __has_builtin(__builtin_bswap32))
278 return __builtin_bswap32(in);
279 #else
280 return ((in << 24) & 0xff000000 ) |
281 ((in << 8) & 0x00ff0000 ) |
282 ((in >> 8) & 0x0000ff00 ) |
283 ((in >> 24) & 0x000000ff );
284 #endif
285 }
286
MEM_swap64(U64 in)287 MEM_STATIC U64 MEM_swap64(U64 in)
288 {
289 #if (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
290 || (defined(__clang__) && __has_builtin(__builtin_bswap64))
291 return __builtin_bswap64(in);
292 #else
293 return ((in << 56) & 0xff00000000000000ULL) |
294 ((in << 40) & 0x00ff000000000000ULL) |
295 ((in << 24) & 0x0000ff0000000000ULL) |
296 ((in << 8) & 0x000000ff00000000ULL) |
297 ((in >> 8) & 0x00000000ff000000ULL) |
298 ((in >> 24) & 0x0000000000ff0000ULL) |
299 ((in >> 40) & 0x000000000000ff00ULL) |
300 ((in >> 56) & 0x00000000000000ffULL);
301 #endif
302 }
303
MEM_swapST(size_t in)304 MEM_STATIC size_t MEM_swapST(size_t in)
305 {
306 if (MEM_32bits())
307 return (size_t)MEM_swap32((U32)in);
308 else
309 return (size_t)MEM_swap64((U64)in);
310 }
311
312 /*=== Little endian r/w ===*/
313
MEM_readLE16(const void * memPtr)314 MEM_STATIC U16 MEM_readLE16(const void* memPtr)
315 {
316 if (MEM_isLittleEndian())
317 return MEM_read16(memPtr);
318 else {
319 const BYTE* p = (const BYTE*)memPtr;
320 return (U16)(p[0] + (p[1]<<8));
321 }
322 }
323
MEM_writeLE16(void * memPtr,U16 val)324 MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
325 {
326 if (MEM_isLittleEndian()) {
327 MEM_write16(memPtr, val);
328 } else {
329 BYTE* p = (BYTE*)memPtr;
330 p[0] = (BYTE)val;
331 p[1] = (BYTE)(val>>8);
332 }
333 }
334
MEM_readLE24(const void * memPtr)335 MEM_STATIC U32 MEM_readLE24(const void* memPtr)
336 {
337 return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
338 }
339
MEM_writeLE24(void * memPtr,U32 val)340 MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
341 {
342 MEM_writeLE16(memPtr, (U16)val);
343 ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
344 }
345
MEM_readLE32(const void * memPtr)346 MEM_STATIC U32 MEM_readLE32(const void* memPtr)
347 {
348 if (MEM_isLittleEndian())
349 return MEM_read32(memPtr);
350 else
351 return MEM_swap32(MEM_read32(memPtr));
352 }
353
MEM_writeLE32(void * memPtr,U32 val32)354 MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
355 {
356 if (MEM_isLittleEndian())
357 MEM_write32(memPtr, val32);
358 else
359 MEM_write32(memPtr, MEM_swap32(val32));
360 }
361
MEM_readLE64(const void * memPtr)362 MEM_STATIC U64 MEM_readLE64(const void* memPtr)
363 {
364 if (MEM_isLittleEndian())
365 return MEM_read64(memPtr);
366 else
367 return MEM_swap64(MEM_read64(memPtr));
368 }
369
MEM_writeLE64(void * memPtr,U64 val64)370 MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
371 {
372 if (MEM_isLittleEndian())
373 MEM_write64(memPtr, val64);
374 else
375 MEM_write64(memPtr, MEM_swap64(val64));
376 }
377
MEM_readLEST(const void * memPtr)378 MEM_STATIC size_t MEM_readLEST(const void* memPtr)
379 {
380 if (MEM_32bits())
381 return (size_t)MEM_readLE32(memPtr);
382 else
383 return (size_t)MEM_readLE64(memPtr);
384 }
385
MEM_writeLEST(void * memPtr,size_t val)386 MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
387 {
388 if (MEM_32bits())
389 MEM_writeLE32(memPtr, (U32)val);
390 else
391 MEM_writeLE64(memPtr, (U64)val);
392 }
393
394 /*=== Big endian r/w ===*/
395
MEM_readBE32(const void * memPtr)396 MEM_STATIC U32 MEM_readBE32(const void* memPtr)
397 {
398 if (MEM_isLittleEndian())
399 return MEM_swap32(MEM_read32(memPtr));
400 else
401 return MEM_read32(memPtr);
402 }
403
MEM_writeBE32(void * memPtr,U32 val32)404 MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
405 {
406 if (MEM_isLittleEndian())
407 MEM_write32(memPtr, MEM_swap32(val32));
408 else
409 MEM_write32(memPtr, val32);
410 }
411
MEM_readBE64(const void * memPtr)412 MEM_STATIC U64 MEM_readBE64(const void* memPtr)
413 {
414 if (MEM_isLittleEndian())
415 return MEM_swap64(MEM_read64(memPtr));
416 else
417 return MEM_read64(memPtr);
418 }
419
MEM_writeBE64(void * memPtr,U64 val64)420 MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
421 {
422 if (MEM_isLittleEndian())
423 MEM_write64(memPtr, MEM_swap64(val64));
424 else
425 MEM_write64(memPtr, val64);
426 }
427
MEM_readBEST(const void * memPtr)428 MEM_STATIC size_t MEM_readBEST(const void* memPtr)
429 {
430 if (MEM_32bits())
431 return (size_t)MEM_readBE32(memPtr);
432 else
433 return (size_t)MEM_readBE64(memPtr);
434 }
435
MEM_writeBEST(void * memPtr,size_t val)436 MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
437 {
438 if (MEM_32bits())
439 MEM_writeBE32(memPtr, (U32)val);
440 else
441 MEM_writeBE64(memPtr, (U64)val);
442 }
443
444
445 #if defined (__cplusplus)
446 }
447 #endif
448
449 #endif /* MEM_H_MODULE */
450