1 /*
2 * xxHash - Fast Hash algorithm
3 * Copyright (C) 2012-2016, Yann Collet
4 *
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You can contact the author at :
31 * - xxHash homepage: http://www.xxhash.com
32 * - xxHash source repository : https://github.com/Cyan4973/xxHash
33 */
34
35
36 /* *************************************
37 * Tuning parameters
38 ***************************************/
39 /*!XXH_FORCE_MEMORY_ACCESS :
40 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
41 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
42 * The below switch allow to select different access method for improved performance.
43 * Method 0 (default) : use `memcpy()`. Safe and portable.
44 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
45 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
46 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
47 * It can generate buggy code on targets which do not support unaligned memory accesses.
48 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
49 * See http://stackoverflow.com/a/32095106/646947 for details.
50 * Prefer these methods in priority order (0 > 1 > 2)
51 */
52 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
53 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
54 # define XXH_FORCE_MEMORY_ACCESS 2
55 # elif defined(__INTEL_COMPILER) || \
56 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
57 # define XXH_FORCE_MEMORY_ACCESS 1
58 # endif
59 #endif
60
61 /*!XXH_ACCEPT_NULL_INPUT_POINTER :
62 * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
63 * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
64 * By default, this option is disabled. To enable it, uncomment below define :
65 */
66 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
67
68 /*!XXH_FORCE_NATIVE_FORMAT :
69 * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
70 * Results are therefore identical for little-endian and big-endian CPU.
71 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
72 * Should endian-independance be of no importance for your application, you may set the #define below to 1,
73 * to improve speed for Big-endian CPU.
74 * This option has no impact on Little_Endian CPU.
75 */
76 #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
77 # define XXH_FORCE_NATIVE_FORMAT 0
78 #endif
79
80 /*!XXH_FORCE_ALIGN_CHECK :
81 * This is a minor performance trick, only useful with lots of very small keys.
82 * It means : check for aligned/unaligned input.
83 * The check costs one initial branch per hash; set to 0 when the input data
84 * is guaranteed to be aligned.
85 */
86 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
87 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
88 # define XXH_FORCE_ALIGN_CHECK 0
89 # else
90 # define XXH_FORCE_ALIGN_CHECK 1
91 # endif
92 #endif
93
94 #if defined(_KERNEL)
95 #include <sys/types.h>
96 #include <sys/systm.h>
97 #else
98
99 /* *************************************
100 * Includes & Memory related functions
101 ***************************************/
102 /* Modify the local functions below should you wish to use some other memory routines */
103 /* for malloc(), free() */
104 #include <stdlib.h>
XXH_malloc(size_t s)105 static void* XXH_malloc(size_t s) { return malloc(s); }
XXH_free(void * p)106 static void XXH_free (void* p) { free(p); }
107 /* for memcpy() */
108 #include <string.h>
109 #endif
110
XXH_memcpy(void * dest,const void * src,size_t size)111 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
112
113 #define XXH_STATIC_LINKING_ONLY
114 #include "xxhash.h"
115
116
117 /* *************************************
118 * Compiler Specific Options
119 ***************************************/
120 #ifdef _MSC_VER /* Visual Studio */
121 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
122 # define FORCE_INLINE static __forceinline
123 #else
124 # if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
125 # ifdef __GNUC__
126 # define FORCE_INLINE static inline __attribute__((always_inline))
127 # else
128 # define FORCE_INLINE static inline
129 # endif
130 # else
131 # define FORCE_INLINE static
132 # endif /* __STDC_VERSION__ */
133 #endif
134
135
136 /* *************************************
137 * Basic Types
138 ***************************************/
139 #ifndef MEM_MODULE
140 # define MEM_MODULE
141 # if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
142 # include <sys/stdint.h>
143 typedef uint8_t BYTE;
144 typedef uint16_t U16;
145 typedef uint32_t U32;
146 typedef int32_t S32;
147 typedef uint64_t U64;
148 # else
149 typedef unsigned char BYTE;
150 typedef unsigned short U16;
151 typedef unsigned int U32;
152 typedef signed int S32;
153 typedef unsigned long long U64;
154 # endif
155 #endif
156
157
158 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
159
160 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read32(const void * memPtr)161 static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
XXH_read64(const void * memPtr)162 static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
163
164 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
165
166 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
167 /* currently only defined for gcc and icc */
168 typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
169
XXH_read32(const void * ptr)170 static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
XXH_read64(const void * ptr)171 static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
172
173 #else
174
175 /* portable and safe solution. Generally efficient.
176 * see : http://stackoverflow.com/a/32095106/646947
177 */
178
XXH_read32(const void * memPtr)179 static U32 XXH_read32(const void* memPtr)
180 {
181 U32 val;
182 memcpy(&val, memPtr, sizeof(val));
183 return val;
184 }
185
XXH_read64(const void * memPtr)186 static U64 XXH_read64(const void* memPtr)
187 {
188 U64 val;
189 memcpy(&val, memPtr, sizeof(val));
190 return val;
191 }
192
193 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
194
195
196 /* ****************************************
197 * Compiler-specific Functions and Macros
198 ******************************************/
199 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
200
201 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
202 #if defined(_MSC_VER)
203 # define XXH_rotl32(x,r) _rotl(x,r)
204 # define XXH_rotl64(x,r) _rotl64(x,r)
205 #else
206 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
207 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
208 #endif
209
210 #if defined(_MSC_VER) /* Visual Studio */
211 # define XXH_swap32 _byteswap_ulong
212 # define XXH_swap64 _byteswap_uint64
213 #elif GCC_VERSION >= 403
214 # define XXH_swap32 __builtin_bswap32
215 # define XXH_swap64 __builtin_bswap64
216 #else
XXH_swap32(U32 x)217 static U32 XXH_swap32 (U32 x)
218 {
219 return ((x << 24) & 0xff000000 ) |
220 ((x << 8) & 0x00ff0000 ) |
221 ((x >> 8) & 0x0000ff00 ) |
222 ((x >> 24) & 0x000000ff );
223 }
XXH_swap64(U64 x)224 static U64 XXH_swap64 (U64 x)
225 {
226 return ((x << 56) & 0xff00000000000000ULL) |
227 ((x << 40) & 0x00ff000000000000ULL) |
228 ((x << 24) & 0x0000ff0000000000ULL) |
229 ((x << 8) & 0x000000ff00000000ULL) |
230 ((x >> 8) & 0x00000000ff000000ULL) |
231 ((x >> 24) & 0x0000000000ff0000ULL) |
232 ((x >> 40) & 0x000000000000ff00ULL) |
233 ((x >> 56) & 0x00000000000000ffULL);
234 }
235 #endif
236
237
238 /* *************************************
239 * Architecture Macros
240 ***************************************/
241 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
242
243 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
244 #ifndef XXH_CPU_LITTLE_ENDIAN
245 static const int g_one = 1;
246 # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
247 #endif
248
249
250 /* ***************************
251 * Memory reads
252 *****************************/
253 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
254
XXH_readLE32_align(const void * ptr,XXH_endianess endian,XXH_alignment align)255 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
256 {
257 if (align==XXH_unaligned)
258 return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
259 else
260 return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
261 }
262
XXH_readLE32(const void * ptr,XXH_endianess endian)263 FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
264 {
265 return XXH_readLE32_align(ptr, endian, XXH_unaligned);
266 }
267
XXH_readBE32(const void * ptr)268 static U32 XXH_readBE32(const void* ptr)
269 {
270 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
271 }
272
XXH_readLE64_align(const void * ptr,XXH_endianess endian,XXH_alignment align)273 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
274 {
275 if (align==XXH_unaligned)
276 return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
277 else
278 return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
279 }
280
XXH_readLE64(const void * ptr,XXH_endianess endian)281 FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
282 {
283 return XXH_readLE64_align(ptr, endian, XXH_unaligned);
284 }
285
XXH_readBE64(const void * ptr)286 static U64 XXH_readBE64(const void* ptr)
287 {
288 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
289 }
290
291
292 /* *************************************
293 * Macros
294 ***************************************/
295 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
296
297
298 /* *************************************
299 * Constants
300 ***************************************/
301 static const U32 PRIME32_1 = 2654435761U;
302 static const U32 PRIME32_2 = 2246822519U;
303 static const U32 PRIME32_3 = 3266489917U;
304 static const U32 PRIME32_4 = 668265263U;
305 static const U32 PRIME32_5 = 374761393U;
306
307 static const U64 PRIME64_1 = 11400714785074694791ULL;
308 static const U64 PRIME64_2 = 14029467366897019727ULL;
309 static const U64 PRIME64_3 = 1609587929392839161ULL;
310 static const U64 PRIME64_4 = 9650029242287828579ULL;
311 static const U64 PRIME64_5 = 2870177450012600261ULL;
312
XXH_versionNumber(void)313 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
314
315
316 /* ***************************
317 * Simple Hash Functions
318 *****************************/
319
XXH32_round(U32 seed,U32 input)320 static U32 XXH32_round(U32 seed, U32 input)
321 {
322 seed += input * PRIME32_2;
323 seed = XXH_rotl32(seed, 13);
324 seed *= PRIME32_1;
325 return seed;
326 }
327
XXH32_endian_align(const void * input,size_t len,U32 seed,XXH_endianess endian,XXH_alignment align)328 FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
329 {
330 const BYTE* p = (const BYTE*)input;
331 const BYTE* bEnd = p + len;
332 U32 h32;
333 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
334
335 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
336 if (p==NULL) {
337 len=0;
338 bEnd=p=(const BYTE*)(size_t)16;
339 }
340 #endif
341
342 if (len>=16) {
343 const BYTE* const limit = bEnd - 16;
344 U32 v1 = seed + PRIME32_1 + PRIME32_2;
345 U32 v2 = seed + PRIME32_2;
346 U32 v3 = seed + 0;
347 U32 v4 = seed - PRIME32_1;
348
349 do {
350 v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
351 v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
352 v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
353 v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
354 } while (p<=limit);
355
356 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
357 } else {
358 h32 = seed + PRIME32_5;
359 }
360
361 h32 += (U32) len;
362
363 while (p+4<=bEnd) {
364 h32 += XXH_get32bits(p) * PRIME32_3;
365 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
366 p+=4;
367 }
368
369 while (p<bEnd) {
370 h32 += (*p) * PRIME32_5;
371 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
372 p++;
373 }
374
375 h32 ^= h32 >> 15;
376 h32 *= PRIME32_2;
377 h32 ^= h32 >> 13;
378 h32 *= PRIME32_3;
379 h32 ^= h32 >> 16;
380
381 return h32;
382 }
383
384
XXH32(const void * input,size_t len,unsigned int seed)385 XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
386 {
387 #if 0
388 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
389 XXH32_CREATESTATE_STATIC(state);
390 XXH32_reset(state, seed);
391 XXH32_update(state, input, len);
392 return XXH32_digest(state);
393 #else
394 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
395
396 if (XXH_FORCE_ALIGN_CHECK) {
397 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
398 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
399 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
400 else
401 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
402 } }
403
404 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
405 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
406 else
407 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
408 #endif
409 }
410
411
XXH64_round(U64 acc,U64 input)412 static U64 XXH64_round(U64 acc, U64 input)
413 {
414 acc += input * PRIME64_2;
415 acc = XXH_rotl64(acc, 31);
416 acc *= PRIME64_1;
417 return acc;
418 }
419
XXH64_mergeRound(U64 acc,U64 val)420 static U64 XXH64_mergeRound(U64 acc, U64 val)
421 {
422 val = XXH64_round(0, val);
423 acc ^= val;
424 acc = acc * PRIME64_1 + PRIME64_4;
425 return acc;
426 }
427
XXH64_endian_align(const void * input,size_t len,U64 seed,XXH_endianess endian,XXH_alignment align)428 FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
429 {
430 const BYTE* p = (const BYTE*)input;
431 const BYTE* const bEnd = p + len;
432 U64 h64;
433 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
434
435 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
436 if (p==NULL) {
437 len=0;
438 bEnd=p=(const BYTE*)(size_t)32;
439 }
440 #endif
441
442 if (len>=32) {
443 const BYTE* const limit = bEnd - 32;
444 U64 v1 = seed + PRIME64_1 + PRIME64_2;
445 U64 v2 = seed + PRIME64_2;
446 U64 v3 = seed + 0;
447 U64 v4 = seed - PRIME64_1;
448
449 do {
450 v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
451 v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
452 v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
453 v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
454 } while (p<=limit);
455
456 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
457 h64 = XXH64_mergeRound(h64, v1);
458 h64 = XXH64_mergeRound(h64, v2);
459 h64 = XXH64_mergeRound(h64, v3);
460 h64 = XXH64_mergeRound(h64, v4);
461
462 } else {
463 h64 = seed + PRIME64_5;
464 }
465
466 h64 += (U64) len;
467
468 while (p+8<=bEnd) {
469 U64 const k1 = XXH64_round(0, XXH_get64bits(p));
470 h64 ^= k1;
471 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
472 p+=8;
473 }
474
475 if (p+4<=bEnd) {
476 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
477 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
478 p+=4;
479 }
480
481 while (p<bEnd) {
482 h64 ^= (*p) * PRIME64_5;
483 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
484 p++;
485 }
486
487 h64 ^= h64 >> 33;
488 h64 *= PRIME64_2;
489 h64 ^= h64 >> 29;
490 h64 *= PRIME64_3;
491 h64 ^= h64 >> 32;
492
493 return h64;
494 }
495
496
XXH64(const void * input,size_t len,unsigned long long seed)497 XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
498 {
499 #if 0
500 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
501 XXH64_CREATESTATE_STATIC(state);
502 XXH64_reset(state, seed);
503 XXH64_update(state, input, len);
504 return XXH64_digest(state);
505 #else
506 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
507
508 if (XXH_FORCE_ALIGN_CHECK) {
509 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
510 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
511 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
512 else
513 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
514 } }
515
516 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
517 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
518 else
519 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
520 #endif
521 }
522
523
524 /* **************************************************
525 * Advanced Hash Functions
526 ****************************************************/
527
528 #if !defined(_KERNEL)
529
XXH32_createState(void)530 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
531 {
532 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
533 }
XXH32_freeState(XXH32_state_t * statePtr)534 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
535 {
536 XXH_free(statePtr);
537 return XXH_OK;
538 }
539
XXH64_createState(void)540 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
541 {
542 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
543 }
XXH64_freeState(XXH64_state_t * statePtr)544 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
545 {
546 XXH_free(statePtr);
547 return XXH_OK;
548 }
549
550 #endif
551
552 /*** Hash feed ***/
553
XXH32_reset(XXH32_state_t * statePtr,unsigned int seed)554 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
555 {
556 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
557 memset(&state, 0, sizeof(state));
558 state.seed = seed;
559 state.v1 = seed + PRIME32_1 + PRIME32_2;
560 state.v2 = seed + PRIME32_2;
561 state.v3 = seed + 0;
562 state.v4 = seed - PRIME32_1;
563 memcpy(statePtr, &state, sizeof(state));
564 return XXH_OK;
565 }
566
567
XXH64_reset(XXH64_state_t * statePtr,unsigned long long seed)568 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
569 {
570 XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
571 memset(&state, 0, sizeof(state));
572 state.seed = seed;
573 state.v1 = seed + PRIME64_1 + PRIME64_2;
574 state.v2 = seed + PRIME64_2;
575 state.v3 = seed + 0;
576 state.v4 = seed - PRIME64_1;
577 memcpy(statePtr, &state, sizeof(state));
578 return XXH_OK;
579 }
580
581
XXH32_update_endian(XXH32_state_t * state,const void * input,size_t len,XXH_endianess endian)582 FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
583 {
584 const BYTE* p = (const BYTE*)input;
585 const BYTE* const bEnd = p + len;
586
587 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
588 if (input==NULL) return XXH_ERROR;
589 #endif
590
591 state->total_len += len;
592
593 if (state->memsize + len < 16) { /* fill in tmp buffer */
594 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
595 state->memsize += (U32)len;
596 return XXH_OK;
597 }
598
599 if (state->memsize) { /* some data left from previous update */
600 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
601 { const U32* p32 = state->mem32;
602 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
603 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
604 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
605 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
606 }
607 p += 16-state->memsize;
608 state->memsize = 0;
609 }
610
611 if (p <= bEnd-16) {
612 const BYTE* const limit = bEnd - 16;
613 U32 v1 = state->v1;
614 U32 v2 = state->v2;
615 U32 v3 = state->v3;
616 U32 v4 = state->v4;
617
618 do {
619 v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
620 v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
621 v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
622 v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
623 } while (p<=limit);
624
625 state->v1 = v1;
626 state->v2 = v2;
627 state->v3 = v3;
628 state->v4 = v4;
629 }
630
631 if (p < bEnd) {
632 XXH_memcpy(state->mem32, p, bEnd-p);
633 state->memsize = (int)(bEnd-p);
634 }
635
636 return XXH_OK;
637 }
638
XXH32_update(XXH32_state_t * state_in,const void * input,size_t len)639 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
640 {
641 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
642
643 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
644 return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
645 else
646 return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
647 }
648
649
650
XXH32_digest_endian(const XXH32_state_t * state,XXH_endianess endian)651 FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
652 {
653 const BYTE * p = (const BYTE*)state->mem32;
654 const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
655 U32 h32;
656
657 if (state->total_len >= 16) {
658 h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
659 } else {
660 h32 = state->seed + PRIME32_5;
661 }
662
663 h32 += (U32) state->total_len;
664
665 while (p+4<=bEnd) {
666 h32 += XXH_readLE32(p, endian) * PRIME32_3;
667 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
668 p+=4;
669 }
670
671 while (p<bEnd) {
672 h32 += (*p) * PRIME32_5;
673 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
674 p++;
675 }
676
677 h32 ^= h32 >> 15;
678 h32 *= PRIME32_2;
679 h32 ^= h32 >> 13;
680 h32 *= PRIME32_3;
681 h32 ^= h32 >> 16;
682
683 return h32;
684 }
685
686
XXH32_digest(const XXH32_state_t * state_in)687 XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
688 {
689 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
690
691 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
692 return XXH32_digest_endian(state_in, XXH_littleEndian);
693 else
694 return XXH32_digest_endian(state_in, XXH_bigEndian);
695 }
696
697
698
699 /* **** XXH64 **** */
700
XXH64_update_endian(XXH64_state_t * state,const void * input,size_t len,XXH_endianess endian)701 FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
702 {
703 const BYTE* p = (const BYTE*)input;
704 const BYTE* const bEnd = p + len;
705
706 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
707 if (input==NULL) return XXH_ERROR;
708 #endif
709
710 state->total_len += len;
711
712 if (state->memsize + len < 32) { /* fill in tmp buffer */
713 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
714 state->memsize += (U32)len;
715 return XXH_OK;
716 }
717
718 if (state->memsize) { /* tmp buffer is full */
719 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
720 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
721 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
722 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
723 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
724 p += 32-state->memsize;
725 state->memsize = 0;
726 }
727
728 if (p+32 <= bEnd) {
729 const BYTE* const limit = bEnd - 32;
730 U64 v1 = state->v1;
731 U64 v2 = state->v2;
732 U64 v3 = state->v3;
733 U64 v4 = state->v4;
734
735 do {
736 v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
737 v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
738 v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
739 v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
740 } while (p<=limit);
741
742 state->v1 = v1;
743 state->v2 = v2;
744 state->v3 = v3;
745 state->v4 = v4;
746 }
747
748 if (p < bEnd) {
749 XXH_memcpy(state->mem64, p, bEnd-p);
750 state->memsize = (int)(bEnd-p);
751 }
752
753 return XXH_OK;
754 }
755
XXH64_update(XXH64_state_t * state_in,const void * input,size_t len)756 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
757 {
758 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
759
760 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
761 return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
762 else
763 return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
764 }
765
766
767
XXH64_digest_endian(const XXH64_state_t * state,XXH_endianess endian)768 FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
769 {
770 const BYTE * p = (const BYTE*)state->mem64;
771 const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
772 U64 h64;
773
774 if (state->total_len >= 32) {
775 U64 const v1 = state->v1;
776 U64 const v2 = state->v2;
777 U64 const v3 = state->v3;
778 U64 const v4 = state->v4;
779
780 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
781 h64 = XXH64_mergeRound(h64, v1);
782 h64 = XXH64_mergeRound(h64, v2);
783 h64 = XXH64_mergeRound(h64, v3);
784 h64 = XXH64_mergeRound(h64, v4);
785 } else {
786 h64 = state->seed + PRIME64_5;
787 }
788
789 h64 += (U64) state->total_len;
790
791 while (p+8<=bEnd) {
792 U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
793 h64 ^= k1;
794 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
795 p+=8;
796 }
797
798 if (p+4<=bEnd) {
799 h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
800 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
801 p+=4;
802 }
803
804 while (p<bEnd) {
805 h64 ^= (*p) * PRIME64_5;
806 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
807 p++;
808 }
809
810 h64 ^= h64 >> 33;
811 h64 *= PRIME64_2;
812 h64 ^= h64 >> 29;
813 h64 *= PRIME64_3;
814 h64 ^= h64 >> 32;
815
816 return h64;
817 }
818
819
XXH64_digest(const XXH64_state_t * state_in)820 XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
821 {
822 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
823
824 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
825 return XXH64_digest_endian(state_in, XXH_littleEndian);
826 else
827 return XXH64_digest_endian(state_in, XXH_bigEndian);
828 }
829
830
831 /* **************************
832 * Canonical representation
833 ****************************/
834
835 /*! Default XXH result types are basic unsigned 32 and 64 bits.
836 * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
837 * These functions allow transformation of hash result into and from its canonical format.
838 * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
839 */
840
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)841 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
842 {
843 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
844 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
845 memcpy(dst, &hash, sizeof(*dst));
846 }
847
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)848 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
849 {
850 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
851 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
852 memcpy(dst, &hash, sizeof(*dst));
853 }
854
XXH32_hashFromCanonical(const XXH32_canonical_t * src)855 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
856 {
857 return XXH_readBE32(src);
858 }
859
XXH64_hashFromCanonical(const XXH64_canonical_t * src)860 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
861 {
862 return XXH_readBE64(src);
863 }
864