1 /*
2 * xxHash - Fast Hash algorithm
3 * Copyright (c) Yann Collet, Facebook, Inc.
4 *
5 * You can contact the author at :
6 * - xxHash homepage: http://www.xxhash.com
7 * - xxHash source repository : https://github.com/Cyan4973/xxHash
8 *
9 * This source code is licensed under both the BSD-style license (found in the
10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11 * in the COPYING file in the root directory of this source tree).
12 * You may select, at your option, one of the above-listed licenses.
13 */
14
15
16 /* *************************************
17 * Tuning parameters
18 ***************************************/
19 /*!XXH_FORCE_MEMORY_ACCESS :
20 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
21 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
22 * The below switch allow to select different access method for improved performance.
23 * Method 0 (default) : use `memcpy()`. Safe and portable.
24 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
25 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
26 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
27 * It can generate buggy code on targets which do not support unaligned memory accesses.
28 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
29 * See http://stackoverflow.com/a/32095106/646947 for details.
30 * Prefer these methods in priority order (0 > 1 > 2)
31 */
32 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
33 # if (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
34 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
35 defined(__ICCARM__)
36 # define XXH_FORCE_MEMORY_ACCESS 1
37 # endif
38 #endif
39
40 /*!XXH_ACCEPT_NULL_INPUT_POINTER :
41 * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
42 * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
43 * By default, this option is disabled. To enable it, uncomment below define :
44 */
45 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
46
47 /*!XXH_FORCE_NATIVE_FORMAT :
48 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
49 * Results are therefore identical for little-endian and big-endian CPU.
50 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
51 * Should endian-independence be of no importance for your application, you may set the #define below to 1,
52 * to improve speed for Big-endian CPU.
53 * This option has no impact on Little_Endian CPU.
54 */
55 #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
56 # define XXH_FORCE_NATIVE_FORMAT 0
57 #endif
58
59 /*!XXH_FORCE_ALIGN_CHECK :
60 * This is a minor performance trick, only useful with lots of very small keys.
61 * It means : check for aligned/unaligned input.
62 * The check costs one initial branch per hash; set to 0 when the input data
63 * is guaranteed to be aligned.
64 */
65 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
66 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
67 # define XXH_FORCE_ALIGN_CHECK 0
68 # else
69 # define XXH_FORCE_ALIGN_CHECK 1
70 # endif
71 #endif
72
73
74 /* *************************************
75 * Includes & Memory related functions
76 ***************************************/
77 /* Modify the local functions below should you wish to use some other memory routines */
78 /* for ZSTD_malloc(), ZSTD_free() */
79 #define ZSTD_DEPS_NEED_MALLOC
80 #include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
XXH_malloc(size_t s)81 static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
XXH_free(void * p)82 static void XXH_free (void* p) { ZSTD_free(p); }
XXH_memcpy(void * dest,const void * src,size_t size)83 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
84
85 #ifndef XXH_STATIC_LINKING_ONLY
86 # define XXH_STATIC_LINKING_ONLY
87 #endif
88 #include "xxhash.h"
89
90
91 /* *************************************
92 * Compiler Specific Options
93 ***************************************/
94 #include "compiler.h"
95
96
97 /* *************************************
98 * Basic Types
99 ***************************************/
100 #include "mem.h" /* BYTE, U32, U64, size_t */
101
102 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
103
104 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read32(const void * memPtr)105 static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
XXH_read64(const void * memPtr)106 static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
107
108 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
109
110 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
111 /* currently only defined for gcc and icc */
112 typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
113
XXH_read32(const void * ptr)114 static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
XXH_read64(const void * ptr)115 static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
116
117 #else
118
119 /* portable and safe solution. Generally efficient.
120 * see : http://stackoverflow.com/a/32095106/646947
121 */
122
XXH_read32(const void * memPtr)123 static U32 XXH_read32(const void* memPtr)
124 {
125 U32 val;
126 ZSTD_memcpy(&val, memPtr, sizeof(val));
127 return val;
128 }
129
XXH_read64(const void * memPtr)130 static U64 XXH_read64(const void* memPtr)
131 {
132 U64 val;
133 ZSTD_memcpy(&val, memPtr, sizeof(val));
134 return val;
135 }
136
137 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
138
139
140 /* ****************************************
141 * Compiler-specific Functions and Macros
142 ******************************************/
143 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
144
145 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
146 #if defined(_MSC_VER)
147 # define XXH_rotl32(x,r) _rotl(x,r)
148 # define XXH_rotl64(x,r) _rotl64(x,r)
149 #else
150 #if defined(__ICCARM__)
151 # include <intrinsics.h>
152 # define XXH_rotl32(x,r) __ROR(x,(32 - r))
153 #else
154 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
155 #endif
156 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
157 #endif
158
159 #if defined(_MSC_VER) /* Visual Studio */
160 # define XXH_swap32 _byteswap_ulong
161 # define XXH_swap64 _byteswap_uint64
162 #elif GCC_VERSION >= 403
163 # define XXH_swap32 __builtin_bswap32
164 # define XXH_swap64 __builtin_bswap64
165 #else
XXH_swap32(U32 x)166 static U32 XXH_swap32 (U32 x)
167 {
168 return ((x << 24) & 0xff000000 ) |
169 ((x << 8) & 0x00ff0000 ) |
170 ((x >> 8) & 0x0000ff00 ) |
171 ((x >> 24) & 0x000000ff );
172 }
XXH_swap64(U64 x)173 static U64 XXH_swap64 (U64 x)
174 {
175 return ((x << 56) & 0xff00000000000000ULL) |
176 ((x << 40) & 0x00ff000000000000ULL) |
177 ((x << 24) & 0x0000ff0000000000ULL) |
178 ((x << 8) & 0x000000ff00000000ULL) |
179 ((x >> 8) & 0x00000000ff000000ULL) |
180 ((x >> 24) & 0x0000000000ff0000ULL) |
181 ((x >> 40) & 0x000000000000ff00ULL) |
182 ((x >> 56) & 0x00000000000000ffULL);
183 }
184 #endif
185
186
187 /* *************************************
188 * Architecture Macros
189 ***************************************/
190 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
191
192 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
193 #ifndef XXH_CPU_LITTLE_ENDIAN
194 static const int g_one = 1;
195 # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
196 #endif
197
198
199 /* ***************************
200 * Memory reads
201 *****************************/
202 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
203
XXH_readLE32_align(const void * ptr,XXH_endianess endian,XXH_alignment align)204 FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
205 {
206 if (align==XXH_unaligned)
207 return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
208 else
209 return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
210 }
211
XXH_readLE32(const void * ptr,XXH_endianess endian)212 FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
213 {
214 return XXH_readLE32_align(ptr, endian, XXH_unaligned);
215 }
216
XXH_readBE32(const void * ptr)217 static U32 XXH_readBE32(const void* ptr)
218 {
219 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
220 }
221
XXH_readLE64_align(const void * ptr,XXH_endianess endian,XXH_alignment align)222 FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
223 {
224 if (align==XXH_unaligned)
225 return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
226 else
227 return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
228 }
229
XXH_readLE64(const void * ptr,XXH_endianess endian)230 FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
231 {
232 return XXH_readLE64_align(ptr, endian, XXH_unaligned);
233 }
234
XXH_readBE64(const void * ptr)235 static U64 XXH_readBE64(const void* ptr)
236 {
237 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
238 }
239
240
241 /* *************************************
242 * Macros
243 ***************************************/
244 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
245
246
247 /* *************************************
248 * Constants
249 ***************************************/
250 static const U32 PRIME32_1 = 2654435761U;
251 static const U32 PRIME32_2 = 2246822519U;
252 static const U32 PRIME32_3 = 3266489917U;
253 static const U32 PRIME32_4 = 668265263U;
254 static const U32 PRIME32_5 = 374761393U;
255
256 static const U64 PRIME64_1 = 11400714785074694791ULL;
257 static const U64 PRIME64_2 = 14029467366897019727ULL;
258 static const U64 PRIME64_3 = 1609587929392839161ULL;
259 static const U64 PRIME64_4 = 9650029242287828579ULL;
260 static const U64 PRIME64_5 = 2870177450012600261ULL;
261
XXH_versionNumber(void)262 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
263
264
265 /* **************************
266 * Utils
267 ****************************/
XXH32_copyState(XXH32_state_t * restrict dstState,const XXH32_state_t * restrict srcState)268 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
269 {
270 ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
271 }
272
XXH64_copyState(XXH64_state_t * restrict dstState,const XXH64_state_t * restrict srcState)273 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
274 {
275 ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
276 }
277
278
279 /* ***************************
280 * Simple Hash Functions
281 *****************************/
282
XXH32_round(U32 seed,U32 input)283 static U32 XXH32_round(U32 seed, U32 input)
284 {
285 seed += input * PRIME32_2;
286 seed = XXH_rotl32(seed, 13);
287 seed *= PRIME32_1;
288 return seed;
289 }
290
XXH32_endian_align(const void * input,size_t len,U32 seed,XXH_endianess endian,XXH_alignment align)291 FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
292 {
293 const BYTE* p = (const BYTE*)input;
294 const BYTE* bEnd = p + len;
295 U32 h32;
296 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
297
298 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
299 if (p==NULL) {
300 len=0;
301 bEnd=p=(const BYTE*)(size_t)16;
302 }
303 #endif
304
305 if (len>=16) {
306 const BYTE* const limit = bEnd - 16;
307 U32 v1 = seed + PRIME32_1 + PRIME32_2;
308 U32 v2 = seed + PRIME32_2;
309 U32 v3 = seed + 0;
310 U32 v4 = seed - PRIME32_1;
311
312 do {
313 v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
314 v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
315 v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
316 v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
317 } while (p<=limit);
318
319 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
320 } else {
321 h32 = seed + PRIME32_5;
322 }
323
324 h32 += (U32) len;
325
326 while (p+4<=bEnd) {
327 h32 += XXH_get32bits(p) * PRIME32_3;
328 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
329 p+=4;
330 }
331
332 while (p<bEnd) {
333 h32 += (*p) * PRIME32_5;
334 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
335 p++;
336 }
337
338 h32 ^= h32 >> 15;
339 h32 *= PRIME32_2;
340 h32 ^= h32 >> 13;
341 h32 *= PRIME32_3;
342 h32 ^= h32 >> 16;
343
344 return h32;
345 }
346
347
XXH32(const void * input,size_t len,unsigned int seed)348 XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
349 {
350 #if 0
351 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
352 XXH32_CREATESTATE_STATIC(state);
353 XXH32_reset(state, seed);
354 XXH32_update(state, input, len);
355 return XXH32_digest(state);
356 #else
357 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
358
359 if (XXH_FORCE_ALIGN_CHECK) {
360 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
361 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
362 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
363 else
364 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
365 } }
366
367 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
368 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
369 else
370 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
371 #endif
372 }
373
374
XXH64_round(U64 acc,U64 input)375 static U64 XXH64_round(U64 acc, U64 input)
376 {
377 acc += input * PRIME64_2;
378 acc = XXH_rotl64(acc, 31);
379 acc *= PRIME64_1;
380 return acc;
381 }
382
XXH64_mergeRound(U64 acc,U64 val)383 static U64 XXH64_mergeRound(U64 acc, U64 val)
384 {
385 val = XXH64_round(0, val);
386 acc ^= val;
387 acc = acc * PRIME64_1 + PRIME64_4;
388 return acc;
389 }
390
XXH64_endian_align(const void * input,size_t len,U64 seed,XXH_endianess endian,XXH_alignment align)391 FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
392 {
393 const BYTE* p = (const BYTE*)input;
394 const BYTE* const bEnd = p + len;
395 U64 h64;
396 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
397
398 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
399 if (p==NULL) {
400 len=0;
401 bEnd=p=(const BYTE*)(size_t)32;
402 }
403 #endif
404
405 if (len>=32) {
406 const BYTE* const limit = bEnd - 32;
407 U64 v1 = seed + PRIME64_1 + PRIME64_2;
408 U64 v2 = seed + PRIME64_2;
409 U64 v3 = seed + 0;
410 U64 v4 = seed - PRIME64_1;
411
412 do {
413 v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
414 v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
415 v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
416 v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
417 } while (p<=limit);
418
419 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
420 h64 = XXH64_mergeRound(h64, v1);
421 h64 = XXH64_mergeRound(h64, v2);
422 h64 = XXH64_mergeRound(h64, v3);
423 h64 = XXH64_mergeRound(h64, v4);
424
425 } else {
426 h64 = seed + PRIME64_5;
427 }
428
429 h64 += (U64) len;
430
431 while (p+8<=bEnd) {
432 U64 const k1 = XXH64_round(0, XXH_get64bits(p));
433 h64 ^= k1;
434 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
435 p+=8;
436 }
437
438 if (p+4<=bEnd) {
439 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
440 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
441 p+=4;
442 }
443
444 while (p<bEnd) {
445 h64 ^= (*p) * PRIME64_5;
446 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
447 p++;
448 }
449
450 h64 ^= h64 >> 33;
451 h64 *= PRIME64_2;
452 h64 ^= h64 >> 29;
453 h64 *= PRIME64_3;
454 h64 ^= h64 >> 32;
455
456 return h64;
457 }
458
459
XXH64(const void * input,size_t len,unsigned long long seed)460 XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
461 {
462 #if 0
463 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
464 XXH64_CREATESTATE_STATIC(state);
465 XXH64_reset(state, seed);
466 XXH64_update(state, input, len);
467 return XXH64_digest(state);
468 #else
469 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
470
471 if (XXH_FORCE_ALIGN_CHECK) {
472 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
473 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
474 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
475 else
476 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
477 } }
478
479 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
480 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
481 else
482 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
483 #endif
484 }
485
486
487 /* **************************************************
488 * Advanced Hash Functions
489 ****************************************************/
490
XXH32_createState(void)491 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
492 {
493 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
494 }
XXH32_freeState(XXH32_state_t * statePtr)495 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
496 {
497 XXH_free(statePtr);
498 return XXH_OK;
499 }
500
XXH64_createState(void)501 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
502 {
503 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
504 }
XXH64_freeState(XXH64_state_t * statePtr)505 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
506 {
507 XXH_free(statePtr);
508 return XXH_OK;
509 }
510
511
512 /*** Hash feed ***/
513
XXH32_reset(XXH32_state_t * statePtr,unsigned int seed)514 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
515 {
516 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
517 ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
518 state.v1 = seed + PRIME32_1 + PRIME32_2;
519 state.v2 = seed + PRIME32_2;
520 state.v3 = seed + 0;
521 state.v4 = seed - PRIME32_1;
522 ZSTD_memcpy(statePtr, &state, sizeof(state));
523 return XXH_OK;
524 }
525
526
XXH64_reset(XXH64_state_t * statePtr,unsigned long long seed)527 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
528 {
529 XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
530 ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
531 state.v1 = seed + PRIME64_1 + PRIME64_2;
532 state.v2 = seed + PRIME64_2;
533 state.v3 = seed + 0;
534 state.v4 = seed - PRIME64_1;
535 ZSTD_memcpy(statePtr, &state, sizeof(state));
536 return XXH_OK;
537 }
538
539
XXH32_update_endian(XXH32_state_t * state,const void * input,size_t len,XXH_endianess endian)540 FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
541 {
542 const BYTE* p = (const BYTE*)input;
543 const BYTE* const bEnd = p + len;
544
545 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
546 if (input==NULL) return XXH_ERROR;
547 #endif
548
549 state->total_len_32 += (unsigned)len;
550 state->large_len |= (len>=16) | (state->total_len_32>=16);
551
552 if (state->memsize + len < 16) { /* fill in tmp buffer */
553 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
554 state->memsize += (unsigned)len;
555 return XXH_OK;
556 }
557
558 if (state->memsize) { /* some data left from previous update */
559 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
560 { const U32* p32 = state->mem32;
561 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
562 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
563 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
564 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
565 }
566 p += 16-state->memsize;
567 state->memsize = 0;
568 }
569
570 if (p <= bEnd-16) {
571 const BYTE* const limit = bEnd - 16;
572 U32 v1 = state->v1;
573 U32 v2 = state->v2;
574 U32 v3 = state->v3;
575 U32 v4 = state->v4;
576
577 do {
578 v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
579 v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
580 v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
581 v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
582 } while (p<=limit);
583
584 state->v1 = v1;
585 state->v2 = v2;
586 state->v3 = v3;
587 state->v4 = v4;
588 }
589
590 if (p < bEnd) {
591 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
592 state->memsize = (unsigned)(bEnd-p);
593 }
594
595 return XXH_OK;
596 }
597
XXH32_update(XXH32_state_t * state_in,const void * input,size_t len)598 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
599 {
600 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
601
602 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
603 return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
604 else
605 return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
606 }
607
608
609
XXH32_digest_endian(const XXH32_state_t * state,XXH_endianess endian)610 FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
611 {
612 const BYTE * p = (const BYTE*)state->mem32;
613 const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
614 U32 h32;
615
616 if (state->large_len) {
617 h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
618 } else {
619 h32 = state->v3 /* == seed */ + PRIME32_5;
620 }
621
622 h32 += state->total_len_32;
623
624 while (p+4<=bEnd) {
625 h32 += XXH_readLE32(p, endian) * PRIME32_3;
626 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
627 p+=4;
628 }
629
630 while (p<bEnd) {
631 h32 += (*p) * PRIME32_5;
632 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
633 p++;
634 }
635
636 h32 ^= h32 >> 15;
637 h32 *= PRIME32_2;
638 h32 ^= h32 >> 13;
639 h32 *= PRIME32_3;
640 h32 ^= h32 >> 16;
641
642 return h32;
643 }
644
645
XXH32_digest(const XXH32_state_t * state_in)646 XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
647 {
648 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
649
650 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
651 return XXH32_digest_endian(state_in, XXH_littleEndian);
652 else
653 return XXH32_digest_endian(state_in, XXH_bigEndian);
654 }
655
656
657
658 /* **** XXH64 **** */
659
XXH64_update_endian(XXH64_state_t * state,const void * input,size_t len,XXH_endianess endian)660 FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
661 {
662 const BYTE* p = (const BYTE*)input;
663 const BYTE* const bEnd = p + len;
664
665 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
666 if (input==NULL) return XXH_ERROR;
667 #endif
668
669 state->total_len += len;
670
671 if (state->memsize + len < 32) { /* fill in tmp buffer */
672 if (input != NULL) {
673 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
674 }
675 state->memsize += (U32)len;
676 return XXH_OK;
677 }
678
679 if (state->memsize) { /* tmp buffer is full */
680 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
681 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
682 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
683 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
684 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
685 p += 32-state->memsize;
686 state->memsize = 0;
687 }
688
689 if (p+32 <= bEnd) {
690 const BYTE* const limit = bEnd - 32;
691 U64 v1 = state->v1;
692 U64 v2 = state->v2;
693 U64 v3 = state->v3;
694 U64 v4 = state->v4;
695
696 do {
697 v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
698 v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
699 v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
700 v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
701 } while (p<=limit);
702
703 state->v1 = v1;
704 state->v2 = v2;
705 state->v3 = v3;
706 state->v4 = v4;
707 }
708
709 if (p < bEnd) {
710 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
711 state->memsize = (unsigned)(bEnd-p);
712 }
713
714 return XXH_OK;
715 }
716
XXH64_update(XXH64_state_t * state_in,const void * input,size_t len)717 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
718 {
719 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
720
721 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
722 return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
723 else
724 return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
725 }
726
727
728
XXH64_digest_endian(const XXH64_state_t * state,XXH_endianess endian)729 FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
730 {
731 const BYTE * p = (const BYTE*)state->mem64;
732 const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
733 U64 h64;
734
735 if (state->total_len >= 32) {
736 U64 const v1 = state->v1;
737 U64 const v2 = state->v2;
738 U64 const v3 = state->v3;
739 U64 const v4 = state->v4;
740
741 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
742 h64 = XXH64_mergeRound(h64, v1);
743 h64 = XXH64_mergeRound(h64, v2);
744 h64 = XXH64_mergeRound(h64, v3);
745 h64 = XXH64_mergeRound(h64, v4);
746 } else {
747 h64 = state->v3 + PRIME64_5;
748 }
749
750 h64 += (U64) state->total_len;
751
752 while (p+8<=bEnd) {
753 U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
754 h64 ^= k1;
755 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
756 p+=8;
757 }
758
759 if (p+4<=bEnd) {
760 h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
761 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
762 p+=4;
763 }
764
765 while (p<bEnd) {
766 h64 ^= (*p) * PRIME64_5;
767 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
768 p++;
769 }
770
771 h64 ^= h64 >> 33;
772 h64 *= PRIME64_2;
773 h64 ^= h64 >> 29;
774 h64 *= PRIME64_3;
775 h64 ^= h64 >> 32;
776
777 return h64;
778 }
779
780
XXH64_digest(const XXH64_state_t * state_in)781 XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
782 {
783 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
784
785 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
786 return XXH64_digest_endian(state_in, XXH_littleEndian);
787 else
788 return XXH64_digest_endian(state_in, XXH_bigEndian);
789 }
790
791
792 /* **************************
793 * Canonical representation
794 ****************************/
795
796 /*! Default XXH result types are basic unsigned 32 and 64 bits.
797 * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
798 * These functions allow transformation of hash result into and from its canonical format.
799 * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
800 */
801
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)802 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
803 {
804 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
805 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
806 ZSTD_memcpy(dst, &hash, sizeof(*dst));
807 }
808
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)809 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
810 {
811 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
812 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
813 ZSTD_memcpy(dst, &hash, sizeof(*dst));
814 }
815
XXH32_hashFromCanonical(const XXH32_canonical_t * src)816 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
817 {
818 return XXH_readBE32(src);
819 }
820
XXH64_hashFromCanonical(const XXH64_canonical_t * src)821 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
822 {
823 return XXH_readBE64(src);
824 }
825