1 /*
2  * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 #ifndef MEM_H_MODULE
12 #define MEM_H_MODULE
13 
14 #if defined (__cplusplus)
15 extern "C" {
16 #endif
17 
18 /*-****************************************
19 *  Dependencies
20 ******************************************/
21 #include <stddef.h>     /* size_t, ptrdiff_t */
22 #include <string.h>     /* memcpy */
23 
24 
25 /*-****************************************
26 *  Compiler specifics
27 ******************************************/
28 #if defined(_MSC_VER)   /* Visual Studio */
29 #   include <stdlib.h>  /* _byteswap_ulong */
30 #   include <intrin.h>  /* _byteswap_* */
31 #endif
32 #if defined(__GNUC__)
33 #  define MEM_STATIC static __inline __attribute__((unused))
34 #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
35 #  define MEM_STATIC static inline
36 #elif defined(_MSC_VER)
37 #  define MEM_STATIC static __inline
38 #else
39 #  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
40 #endif
41 
42 #ifndef __has_builtin
43 #  define __has_builtin(x) 0  /* compat. with non-clang compilers */
44 #endif
45 
46 /* code only tested on 32 and 64 bits systems */
47 #define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
MEM_check(void)48 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
49 
50 /* detects whether we are being compiled under msan */
51 #if defined (__has_feature)
52 #  if __has_feature(memory_sanitizer)
53 #    define MEMORY_SANITIZER 1
54 #  endif
55 #endif
56 
57 #if defined (MEMORY_SANITIZER)
58 /* Not all platforms that support msan provide sanitizers/msan_interface.h.
59  * We therefore declare the functions we need ourselves, rather than trying to
60  * include the header file... */
61 
62 #include <stdint.h> /* intptr_t */
63 
64 /* Make memory region fully initialized (without changing its contents). */
65 void __msan_unpoison(const volatile void *a, size_t size);
66 
67 /* Make memory region fully uninitialized (without changing its contents).
68    This is a legacy interface that does not update origin information. Use
69    __msan_allocated_memory() instead. */
70 void __msan_poison(const volatile void *a, size_t size);
71 
72 /* Returns the offset of the first (at least partially) poisoned byte in the
73    memory range, or -1 if the whole range is good. */
74 intptr_t __msan_test_shadow(const volatile void *x, size_t size);
75 #endif
76 
77 /* detects whether we are being compiled under asan */
78 #if defined (ZFS_ASAN_ENABLED)
79 #  define ADDRESS_SANITIZER 1
80 #  define ZSTD_ASAN_DONT_POISON_WORKSPACE
81 #endif
82 
83 #if defined (ADDRESS_SANITIZER)
84 /* Not all platforms that support asan provide sanitizers/asan_interface.h.
85  * We therefore declare the functions we need ourselves, rather than trying to
86  * include the header file... */
87 
88 /**
89  * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
90  *
91  * This memory must be previously allocated by your program. Instrumented
92  * code is forbidden from accessing addresses in this region until it is
93  * unpoisoned. This function is not guaranteed to poison the entire region -
94  * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
95  * alignment restrictions.
96  *
97  * \note This function is not thread-safe because no two threads can poison or
98  * unpoison memory in the same memory region simultaneously.
99  *
100  * \param addr Start of memory region.
101  * \param size Size of memory region. */
102 void __asan_poison_memory_region(void const volatile *addr, size_t size);
103 
104 /**
105  * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
106  *
107  * This memory must be previously allocated by your program. Accessing
108  * addresses in this region is allowed until this region is poisoned again.
109  * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
110  * to ASan alignment restrictions.
111  *
112  * \note This function is not thread-safe because no two threads can
113  * poison or unpoison memory in the same memory region simultaneously.
114  *
115  * \param addr Start of memory region.
116  * \param size Size of memory region. */
117 void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
118 #endif
119 
120 
121 /*-**************************************************************
122 *  Basic Types
123 *****************************************************************/
124 #if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
125 # include <stdint.h>
126   typedef   uint8_t BYTE;
127   typedef  uint16_t U16;
128   typedef   int16_t S16;
129   typedef  uint32_t U32;
130   typedef   int32_t S32;
131   typedef  uint64_t U64;
132   typedef   int64_t S64;
133 #else
134 # include <limits.h>
135 #if CHAR_BIT != 8
136 #  error "this implementation requires char to be exactly 8-bit type"
137 #endif
138   typedef unsigned char      BYTE;
139 #if USHRT_MAX != 65535
140 #  error "this implementation requires short to be exactly 16-bit type"
141 #endif
142   typedef unsigned short      U16;
143   typedef   signed short      S16;
144 #if UINT_MAX != 4294967295
145 #  error "this implementation requires int to be exactly 32-bit type"
146 #endif
147   typedef unsigned int        U32;
148   typedef   signed int        S32;
149 /* note : there are no limits defined for long long type in C90.
150  * limits exist in C99, however, in such case, <stdint.h> is preferred */
151   typedef unsigned long long  U64;
152   typedef   signed long long  S64;
153 #endif
154 
155 
156 /*-**************************************************************
157 *  Memory I/O
158 *****************************************************************/
159 /* MEM_FORCE_MEMORY_ACCESS :
160  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
161  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
162  * The below switch allow to select different access method for improved performance.
163  * Method 0 (default) : use `memcpy()`. Safe and portable.
164  * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
165  *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
166  * Method 2 : direct access. This method is portable but violate C standard.
167  *            It can generate buggy code on targets depending on alignment.
168  *            In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
169  * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
170  * Prefer these methods in priority order (0 > 1 > 2)
171  */
172 #ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
173 #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
174 #    define MEM_FORCE_MEMORY_ACCESS 2
175 #  elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
176 #    define MEM_FORCE_MEMORY_ACCESS 1
177 #  endif
178 #endif
179 
MEM_32bits(void)180 MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
MEM_64bits(void)181 MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
182 
MEM_isLittleEndian(void)183 MEM_STATIC unsigned MEM_isLittleEndian(void)
184 {
185     const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
186     return one.c[0];
187 }
188 
189 #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
190 
191 /* violates C standard, by lying on structure alignment.
192 Only use if no other choice to achieve best performance on target platform */
MEM_read16(const void * memPtr)193 MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_read32(const void * memPtr)194 MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_read64(const void * memPtr)195 MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_readST(const void * memPtr)196 MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
197 
MEM_write16(void * memPtr,U16 value)198 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
MEM_write32(void * memPtr,U32 value)199 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
MEM_write64(void * memPtr,U64 value)200 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
201 
202 #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
203 
204 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
205 /* currently only defined for gcc and icc */
206 #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
207     __pragma( pack(push, 1) )
208     typedef struct { U16 v; } unalign16;
209     typedef struct { U32 v; } unalign32;
210     typedef struct { U64 v; } unalign64;
211     typedef struct { size_t v; } unalignArch;
__pragma(pack (pop))212     __pragma( pack(pop) )
213 #else
214     typedef struct { U16 v; } __attribute__((packed)) unalign16;
215     typedef struct { U32 v; } __attribute__((packed)) unalign32;
216     typedef struct { U64 v; } __attribute__((packed)) unalign64;
217     typedef struct { size_t v; } __attribute__((packed)) unalignArch;
218 #endif
219 
220 MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
MEM_read32(const void * ptr)221 MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
MEM_read64(const void * ptr)222 MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
MEM_readST(const void * ptr)223 MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
224 
MEM_write16(void * memPtr,U16 value)225 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
MEM_write32(void * memPtr,U32 value)226 MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
MEM_write64(void * memPtr,U64 value)227 MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
228 
229 #else
230 
231 /* default method, safe and standard.
232    can sometimes prove slower */
233 
MEM_read16(const void * memPtr)234 MEM_STATIC U16 MEM_read16(const void* memPtr)
235 {
236     U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
237 }
238 
MEM_read32(const void * memPtr)239 MEM_STATIC U32 MEM_read32(const void* memPtr)
240 {
241     U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
242 }
243 
MEM_read64(const void * memPtr)244 MEM_STATIC U64 MEM_read64(const void* memPtr)
245 {
246     U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
247 }
248 
MEM_readST(const void * memPtr)249 MEM_STATIC size_t MEM_readST(const void* memPtr)
250 {
251     size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
252 }
253 
MEM_write16(void * memPtr,U16 value)254 MEM_STATIC void MEM_write16(void* memPtr, U16 value)
255 {
256     memcpy(memPtr, &value, sizeof(value));
257 }
258 
MEM_write32(void * memPtr,U32 value)259 MEM_STATIC void MEM_write32(void* memPtr, U32 value)
260 {
261     memcpy(memPtr, &value, sizeof(value));
262 }
263 
MEM_write64(void * memPtr,U64 value)264 MEM_STATIC void MEM_write64(void* memPtr, U64 value)
265 {
266     memcpy(memPtr, &value, sizeof(value));
267 }
268 
269 #endif /* MEM_FORCE_MEMORY_ACCESS */
270 
MEM_swap32(U32 in)271 MEM_STATIC U32 MEM_swap32(U32 in)
272 {
273 #if defined(_MSC_VER)     /* Visual Studio */
274     return _byteswap_ulong(in);
275 #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
276   || (defined(__clang__) && __has_builtin(__builtin_bswap32))
277     return __builtin_bswap32(in);
278 #else
279     return  ((in << 24) & 0xff000000 ) |
280             ((in <<  8) & 0x00ff0000 ) |
281             ((in >>  8) & 0x0000ff00 ) |
282             ((in >> 24) & 0x000000ff );
283 #endif
284 }
285 
MEM_swap64(U64 in)286 MEM_STATIC U64 MEM_swap64(U64 in)
287 {
288 #if defined(_MSC_VER)     /* Visual Studio */
289     return _byteswap_uint64(in);
290 #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
291   || (defined(__clang__) && __has_builtin(__builtin_bswap64))
292     return __builtin_bswap64(in);
293 #else
294     return  ((in << 56) & 0xff00000000000000ULL) |
295             ((in << 40) & 0x00ff000000000000ULL) |
296             ((in << 24) & 0x0000ff0000000000ULL) |
297             ((in << 8)  & 0x000000ff00000000ULL) |
298             ((in >> 8)  & 0x00000000ff000000ULL) |
299             ((in >> 24) & 0x0000000000ff0000ULL) |
300             ((in >> 40) & 0x000000000000ff00ULL) |
301             ((in >> 56) & 0x00000000000000ffULL);
302 #endif
303 }
304 
MEM_swapST(size_t in)305 MEM_STATIC size_t MEM_swapST(size_t in)
306 {
307     if (MEM_32bits())
308         return (size_t)MEM_swap32((U32)in);
309     else
310         return (size_t)MEM_swap64((U64)in);
311 }
312 
313 /*=== Little endian r/w ===*/
314 
MEM_readLE16(const void * memPtr)315 MEM_STATIC U16 MEM_readLE16(const void* memPtr)
316 {
317     if (MEM_isLittleEndian())
318         return MEM_read16(memPtr);
319     else {
320         const BYTE* p = (const BYTE*)memPtr;
321         return (U16)(p[0] + (p[1]<<8));
322     }
323 }
324 
MEM_writeLE16(void * memPtr,U16 val)325 MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
326 {
327     if (MEM_isLittleEndian()) {
328         MEM_write16(memPtr, val);
329     } else {
330         BYTE* p = (BYTE*)memPtr;
331         p[0] = (BYTE)val;
332         p[1] = (BYTE)(val>>8);
333     }
334 }
335 
MEM_readLE24(const void * memPtr)336 MEM_STATIC U32 MEM_readLE24(const void* memPtr)
337 {
338     return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
339 }
340 
MEM_writeLE24(void * memPtr,U32 val)341 MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
342 {
343     MEM_writeLE16(memPtr, (U16)val);
344     ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
345 }
346 
MEM_readLE32(const void * memPtr)347 MEM_STATIC U32 MEM_readLE32(const void* memPtr)
348 {
349     if (MEM_isLittleEndian())
350         return MEM_read32(memPtr);
351     else
352         return MEM_swap32(MEM_read32(memPtr));
353 }
354 
MEM_writeLE32(void * memPtr,U32 val32)355 MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
356 {
357     if (MEM_isLittleEndian())
358         MEM_write32(memPtr, val32);
359     else
360         MEM_write32(memPtr, MEM_swap32(val32));
361 }
362 
MEM_readLE64(const void * memPtr)363 MEM_STATIC U64 MEM_readLE64(const void* memPtr)
364 {
365     if (MEM_isLittleEndian())
366         return MEM_read64(memPtr);
367     else
368         return MEM_swap64(MEM_read64(memPtr));
369 }
370 
MEM_writeLE64(void * memPtr,U64 val64)371 MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
372 {
373     if (MEM_isLittleEndian())
374         MEM_write64(memPtr, val64);
375     else
376         MEM_write64(memPtr, MEM_swap64(val64));
377 }
378 
MEM_readLEST(const void * memPtr)379 MEM_STATIC size_t MEM_readLEST(const void* memPtr)
380 {
381     if (MEM_32bits())
382         return (size_t)MEM_readLE32(memPtr);
383     else
384         return (size_t)MEM_readLE64(memPtr);
385 }
386 
MEM_writeLEST(void * memPtr,size_t val)387 MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
388 {
389     if (MEM_32bits())
390         MEM_writeLE32(memPtr, (U32)val);
391     else
392         MEM_writeLE64(memPtr, (U64)val);
393 }
394 
395 /*=== Big endian r/w ===*/
396 
MEM_readBE32(const void * memPtr)397 MEM_STATIC U32 MEM_readBE32(const void* memPtr)
398 {
399     if (MEM_isLittleEndian())
400         return MEM_swap32(MEM_read32(memPtr));
401     else
402         return MEM_read32(memPtr);
403 }
404 
MEM_writeBE32(void * memPtr,U32 val32)405 MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
406 {
407     if (MEM_isLittleEndian())
408         MEM_write32(memPtr, MEM_swap32(val32));
409     else
410         MEM_write32(memPtr, val32);
411 }
412 
MEM_readBE64(const void * memPtr)413 MEM_STATIC U64 MEM_readBE64(const void* memPtr)
414 {
415     if (MEM_isLittleEndian())
416         return MEM_swap64(MEM_read64(memPtr));
417     else
418         return MEM_read64(memPtr);
419 }
420 
MEM_writeBE64(void * memPtr,U64 val64)421 MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
422 {
423     if (MEM_isLittleEndian())
424         MEM_write64(memPtr, MEM_swap64(val64));
425     else
426         MEM_write64(memPtr, val64);
427 }
428 
MEM_readBEST(const void * memPtr)429 MEM_STATIC size_t MEM_readBEST(const void* memPtr)
430 {
431     if (MEM_32bits())
432         return (size_t)MEM_readBE32(memPtr);
433     else
434         return (size_t)MEM_readBE64(memPtr);
435 }
436 
MEM_writeBEST(void * memPtr,size_t val)437 MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
438 {
439     if (MEM_32bits())
440         MEM_writeBE32(memPtr, (U32)val);
441     else
442         MEM_writeBE64(memPtr, (U64)val);
443 }
444 
445 
446 #if defined (__cplusplus)
447 }
448 #endif
449 
450 #endif /* MEM_H_MODULE */
451