1 /*
2 * xxHash - Fast Hash algorithm
3 * Copyright (C) 2012-2016, Yann Collet
4 *
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You can contact the author at :
31 * - xxHash homepage: http://www.xxhash.com
32 * - xxHash source repository : https://github.com/Cyan4973/xxHash
33 */
34
35
36 /* *************************************
37 * Tuning parameters
38 ***************************************/
39 /*!XXH_FORCE_MEMORY_ACCESS :
40 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
41 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
42 * The below switch allow to select different access method for improved performance.
43 * Method 0 (default) : use `memcpy()`. Safe and portable.
44 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
45 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
46 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
47 * It can generate buggy code on targets which do not support unaligned memory accesses.
48 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
49 * See http://stackoverflow.com/a/32095106/646947 for details.
50 * Prefer these methods in priority order (0 > 1 > 2)
51 */
52 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
53 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
54 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
55 || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
56 # define XXH_FORCE_MEMORY_ACCESS 2
57 # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
58 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
59 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
60 || defined(__ARM_ARCH_7S__) ))
61 # define XXH_FORCE_MEMORY_ACCESS 1
62 # endif
63 #endif
64
65 /*!XXH_ACCEPT_NULL_INPUT_POINTER :
66 * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
67 * When this macro is enabled, xxHash actively checks input for null pointer.
68 * It it is, result for null input pointers is the same as a null-length input.
69 */
70 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
71 # define XXH_ACCEPT_NULL_INPUT_POINTER 0
72 #endif
73
74 /*!XXH_FORCE_ALIGN_CHECK :
75 * This is a minor performance trick, only useful with lots of very small keys.
76 * It means : check for aligned/unaligned input.
77 * The check costs one initial branch per hash;
78 * set it to 0 when the input is guaranteed to be aligned,
79 * or when alignment doesn't matter for performance.
80 */
81 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
82 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
83 # define XXH_FORCE_ALIGN_CHECK 0
84 # else
85 # define XXH_FORCE_ALIGN_CHECK 1
86 # endif
87 #endif
88
89 /*!XXH_REROLL:
90 * Whether to reroll XXH32_finalize, and XXH64_finalize,
91 * instead of using an unrolled jump table/if statement loop.
92 *
93 * This is automatically defined on -Os/-Oz on GCC and Clang. */
94 #ifndef XXH_REROLL
95 # if defined(__OPTIMIZE_SIZE__)
96 # define XXH_REROLL 1
97 # else
98 # define XXH_REROLL 0
99 # endif
100 #endif
101
102 /* *************************************
103 * Includes & Memory related functions
104 ***************************************/
105 /*! Modify the local functions below should you wish to use some other memory routines
106 * for malloc(), free() */
107 #include <stdlib.h>
XXH_malloc(size_t s)108 static void* XXH_malloc(size_t s) { return malloc(s); }
XXH_free(void * p)109 static void XXH_free (void* p) { free(p); }
110 /*! and for memcpy() */
111 #include <string.h>
XXH_memcpy(void * dest,const void * src,size_t size)112 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
113
114 #include <limits.h> /* ULLONG_MAX */
115
116 #define XXH_STATIC_LINKING_ONLY
117 #include "xxhash.h"
118
119
120 /* *************************************
121 * Compiler Specific Options
122 ***************************************/
123 #ifdef _MSC_VER /* Visual Studio */
124 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
125 # define XXH_FORCE_INLINE static __forceinline
126 # define XXH_NO_INLINE static __declspec(noinline)
127 #else
128 # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
129 # ifdef __GNUC__
130 # define XXH_FORCE_INLINE static inline __attribute__((always_inline))
131 # define XXH_NO_INLINE static __attribute__((noinline))
132 # else
133 # define XXH_FORCE_INLINE static inline
134 # define XXH_NO_INLINE static
135 # endif
136 # else
137 # define XXH_FORCE_INLINE static
138 # define XXH_NO_INLINE static
139 # endif /* __STDC_VERSION__ */
140 #endif
141
142
143
144 /* *************************************
145 * Debug
146 ***************************************/
147 /* DEBUGLEVEL is expected to be defined externally,
148 * typically through compiler command line.
149 * Value must be a number. */
150 #ifndef DEBUGLEVEL
151 # define DEBUGLEVEL 0
152 #endif
153
154 #if (DEBUGLEVEL>=1)
155 # include <assert.h> /* note : can still be disabled with NDEBUG */
156 # define XXH_ASSERT(c) assert(c)
157 #else
158 # define XXH_ASSERT(c) ((void)0)
159 #endif
160
161 /* note : use after variable declarations */
162 #define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; }
163
164
165 /* *************************************
166 * Basic Types
167 ***************************************/
168 #ifndef MEM_MODULE
169 # if !defined (__VMS) \
170 && (defined (__cplusplus) \
171 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
172 # include <stdint.h>
173 typedef uint8_t BYTE;
174 typedef uint16_t U16;
175 typedef uint32_t U32;
176 # else
177 typedef unsigned char BYTE;
178 typedef unsigned short U16;
179 typedef unsigned int U32;
180 # endif
181 #endif
182
183
184 /* === Memory access === */
185
186 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
187
188 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read32(const void * memPtr)189 static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
190
191 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
192
193 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
194 /* currently only defined for gcc and icc */
195 typedef union { U32 u32; } __attribute__((packed)) unalign;
XXH_read32(const void * ptr)196 static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
197
198 #else
199
200 /* portable and safe solution. Generally efficient.
201 * see : http://stackoverflow.com/a/32095106/646947
202 */
XXH_read32(const void * memPtr)203 static U32 XXH_read32(const void* memPtr)
204 {
205 U32 val;
206 memcpy(&val, memPtr, sizeof(val));
207 return val;
208 }
209
210 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
211
212
213 /* === Endianess === */
214 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
215
216 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
217 #ifndef XXH_CPU_LITTLE_ENDIAN
XXH_isLittleEndian(void)218 static int XXH_isLittleEndian(void)
219 {
220 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
221 return one.c[0];
222 }
223 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
224 #endif
225
226
227
228
229 /* ****************************************
230 * Compiler-specific Functions and Macros
231 ******************************************/
232 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
233
234 #ifndef __has_builtin
235 # define __has_builtin(x) 0
236 #endif
237
238 #if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) && __has_builtin(__builtin_rotateleft64)
239 # define XXH_rotl32 __builtin_rotateleft32
240 # define XXH_rotl64 __builtin_rotateleft64
241 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
242 #elif defined(_MSC_VER)
243 # define XXH_rotl32(x,r) _rotl(x,r)
244 # define XXH_rotl64(x,r) _rotl64(x,r)
245 #else
246 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
247 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
248 #endif
249
250 #if defined(_MSC_VER) /* Visual Studio */
251 # define XXH_swap32 _byteswap_ulong
252 #elif XXH_GCC_VERSION >= 403
253 # define XXH_swap32 __builtin_bswap32
254 #else
XXH_swap32(U32 x)255 static U32 XXH_swap32 (U32 x)
256 {
257 return ((x << 24) & 0xff000000 ) |
258 ((x << 8) & 0x00ff0000 ) |
259 ((x >> 8) & 0x0000ff00 ) |
260 ((x >> 24) & 0x000000ff );
261 }
262 #endif
263
264
265 /* ***************************
266 * Memory reads
267 *****************************/
268 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
269
XXH_readLE32(const void * ptr)270 XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr)
271 {
272 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
273 }
274
XXH_readBE32(const void * ptr)275 static U32 XXH_readBE32(const void* ptr)
276 {
277 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
278 }
279
280 XXH_FORCE_INLINE U32
XXH_readLE32_align(const void * ptr,XXH_alignment align)281 XXH_readLE32_align(const void* ptr, XXH_alignment align)
282 {
283 if (align==XXH_unaligned) {
284 return XXH_readLE32(ptr);
285 } else {
286 return XXH_CPU_LITTLE_ENDIAN ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
287 }
288 }
289
290
291 /* *************************************
292 * Misc
293 ***************************************/
XXH_versionNumber(void)294 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
295
296
297 /* *******************************************************************
298 * 32-bit hash functions
299 *********************************************************************/
300 static const U32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */
301 static const U32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */
302 static const U32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */
303 static const U32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */
304 static const U32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */
305
XXH32_round(U32 acc,U32 input)306 static U32 XXH32_round(U32 acc, U32 input)
307 {
308 acc += input * PRIME32_2;
309 acc = XXH_rotl32(acc, 13);
310 acc *= PRIME32_1;
311 #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
312 /* UGLY HACK:
313 * This inline assembly hack forces acc into a normal register. This is the
314 * only thing that prevents GCC and Clang from autovectorizing the XXH32 loop
315 * (pragmas and attributes don't work for some resason) without globally
316 * disabling SSE4.1.
317 *
318 * The reason we want to avoid vectorization is because despite working on
319 * 4 integers at a time, there are multiple factors slowing XXH32 down on
320 * SSE4:
321 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on newer chips!)
322 * making it slightly slower to multiply four integers at once compared to four
323 * integers independently. Even when pmulld was fastest, Sandy/Ivy Bridge, it is
324 * still not worth it to go into SSE just to multiply unless doing a long operation.
325 *
326 * - Four instructions are required to rotate,
327 * movqda tmp, v // not required with VEX encoding
328 * pslld tmp, 13 // tmp <<= 13
329 * psrld v, 19 // x >>= 19
330 * por v, tmp // x |= tmp
331 * compared to one for scalar:
332 * roll v, 13 // reliably fast across the board
333 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
334 *
335 * - Instruction level parallelism is actually more beneficial here because the
336 * SIMD actually serializes this operation: While v1 is rotating, v2 can load data,
337 * while v3 can multiply. SSE forces them to operate together.
338 *
339 * How this hack works:
340 * __asm__("" // Declare an assembly block but don't declare any instructions
341 * : // However, as an Input/Output Operand,
342 * "+r" // constrain a read/write operand (+) as a general purpose register (r).
343 * (acc) // and set acc as the operand
344 * );
345 *
346 * Because of the 'r', the compiler has promised that seed will be in a
347 * general purpose register and the '+' says that it will be 'read/write',
348 * so it has to assume it has changed. It is like volatile without all the
349 * loads and stores.
350 *
351 * Since the argument has to be in a normal register (not an SSE register),
352 * each time XXH32_round is called, it is impossible to vectorize. */
353 __asm__("" : "+r" (acc));
354 #endif
355 return acc;
356 }
357
358 /* mix all bits */
XXH32_avalanche(U32 h32)359 static U32 XXH32_avalanche(U32 h32)
360 {
361 h32 ^= h32 >> 15;
362 h32 *= PRIME32_2;
363 h32 ^= h32 >> 13;
364 h32 *= PRIME32_3;
365 h32 ^= h32 >> 16;
366 return(h32);
367 }
368
369 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
370
371 static U32
XXH32_finalize(U32 h32,const void * ptr,size_t len,XXH_alignment align)372 XXH32_finalize(U32 h32, const void* ptr, size_t len, XXH_alignment align)
373 {
374 const BYTE* p = (const BYTE*)ptr;
375
376 #define PROCESS1 \
377 h32 += (*p++) * PRIME32_5; \
378 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
379
380 #define PROCESS4 \
381 h32 += XXH_get32bits(p) * PRIME32_3; \
382 p+=4; \
383 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
384
385 /* Compact rerolled version */
386 if (XXH_REROLL) {
387 len &= 15;
388 while (len >= 4) {
389 PROCESS4;
390 len -= 4;
391 }
392 while (len > 0) {
393 PROCESS1;
394 --len;
395 }
396 return XXH32_avalanche(h32);
397 } else {
398 switch(len&15) /* or switch(bEnd - p) */ {
399 case 12: PROCESS4;
400 /* fallthrough */
401 case 8: PROCESS4;
402 /* fallthrough */
403 case 4: PROCESS4;
404 return XXH32_avalanche(h32);
405
406 case 13: PROCESS4;
407 /* fallthrough */
408 case 9: PROCESS4;
409 /* fallthrough */
410 case 5: PROCESS4;
411 PROCESS1;
412 return XXH32_avalanche(h32);
413
414 case 14: PROCESS4;
415 /* fallthrough */
416 case 10: PROCESS4;
417 /* fallthrough */
418 case 6: PROCESS4;
419 PROCESS1;
420 PROCESS1;
421 return XXH32_avalanche(h32);
422
423 case 15: PROCESS4;
424 /* fallthrough */
425 case 11: PROCESS4;
426 /* fallthrough */
427 case 7: PROCESS4;
428 /* fallthrough */
429 case 3: PROCESS1;
430 /* fallthrough */
431 case 2: PROCESS1;
432 /* fallthrough */
433 case 1: PROCESS1;
434 /* fallthrough */
435 case 0: return XXH32_avalanche(h32);
436 }
437 XXH_ASSERT(0);
438 return h32; /* reaching this point is deemed impossible */
439 }
440 }
441
442 XXH_FORCE_INLINE U32
XXH32_endian_align(const void * input,size_t len,U32 seed,XXH_alignment align)443 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_alignment align)
444 {
445 const BYTE* p = (const BYTE*)input;
446 const BYTE* bEnd = p + len;
447 U32 h32;
448
449 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
450 if (p==NULL) {
451 len=0;
452 bEnd=p=(const BYTE*)(size_t)16;
453 }
454 #endif
455
456 if (len>=16) {
457 const BYTE* const limit = bEnd - 15;
458 U32 v1 = seed + PRIME32_1 + PRIME32_2;
459 U32 v2 = seed + PRIME32_2;
460 U32 v3 = seed + 0;
461 U32 v4 = seed - PRIME32_1;
462
463 do {
464 v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
465 v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
466 v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
467 v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
468 } while (p < limit);
469
470 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
471 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
472 } else {
473 h32 = seed + PRIME32_5;
474 }
475
476 h32 += (U32)len;
477
478 return XXH32_finalize(h32, p, len&15, align);
479 }
480
481
XXH32(const void * input,size_t len,unsigned int seed)482 XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
483 {
484 #if 0
485 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
486 XXH32_state_t state;
487 XXH32_reset(&state, seed);
488 XXH32_update(&state, input, len);
489 return XXH32_digest(&state);
490
491 #else
492
493 if (XXH_FORCE_ALIGN_CHECK) {
494 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
495 return XXH32_endian_align(input, len, seed, XXH_aligned);
496 } }
497
498 return XXH32_endian_align(input, len, seed, XXH_unaligned);
499 #endif
500 }
501
502
503
504 /*====== Hash streaming ======*/
505
XXH32_createState(void)506 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
507 {
508 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
509 }
XXH32_freeState(XXH32_state_t * statePtr)510 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
511 {
512 XXH_free(statePtr);
513 return XXH_OK;
514 }
515
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)516 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
517 {
518 memcpy(dstState, srcState, sizeof(*dstState));
519 }
520
XXH32_reset(XXH32_state_t * statePtr,unsigned int seed)521 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
522 {
523 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
524 memset(&state, 0, sizeof(state));
525 state.v1 = seed + PRIME32_1 + PRIME32_2;
526 state.v2 = seed + PRIME32_2;
527 state.v3 = seed + 0;
528 state.v4 = seed - PRIME32_1;
529 /* do not write into reserved, planned to be removed in a future version */
530 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
531 return XXH_OK;
532 }
533
534
535 XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)536 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
537 {
538 if (input==NULL)
539 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
540 return XXH_OK;
541 #else
542 return XXH_ERROR;
543 #endif
544
545 { const BYTE* p = (const BYTE*)input;
546 const BYTE* const bEnd = p + len;
547
548 state->total_len_32 += (XXH32_hash_t)len;
549 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
550
551 if (state->memsize + len < 16) { /* fill in tmp buffer */
552 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
553 state->memsize += (XXH32_hash_t)len;
554 return XXH_OK;
555 }
556
557 if (state->memsize) { /* some data left from previous update */
558 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
559 { const U32* p32 = state->mem32;
560 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
561 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
562 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
563 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
564 }
565 p += 16-state->memsize;
566 state->memsize = 0;
567 }
568
569 if (p <= bEnd-16) {
570 const BYTE* const limit = bEnd - 16;
571 U32 v1 = state->v1;
572 U32 v2 = state->v2;
573 U32 v3 = state->v3;
574 U32 v4 = state->v4;
575
576 do {
577 v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
578 v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
579 v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
580 v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
581 } while (p<=limit);
582
583 state->v1 = v1;
584 state->v2 = v2;
585 state->v3 = v3;
586 state->v4 = v4;
587 }
588
589 if (p < bEnd) {
590 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
591 state->memsize = (unsigned)(bEnd-p);
592 }
593 }
594
595 return XXH_OK;
596 }
597
598
XXH32_digest(const XXH32_state_t * state)599 XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state)
600 {
601 U32 h32;
602
603 if (state->large_len) {
604 h32 = XXH_rotl32(state->v1, 1)
605 + XXH_rotl32(state->v2, 7)
606 + XXH_rotl32(state->v3, 12)
607 + XXH_rotl32(state->v4, 18);
608 } else {
609 h32 = state->v3 /* == seed */ + PRIME32_5;
610 }
611
612 h32 += state->total_len_32;
613
614 return XXH32_finalize(h32, state->mem32, state->memsize, XXH_aligned);
615 }
616
617
618 /*====== Canonical representation ======*/
619
620 /*! Default XXH result types are basic unsigned 32 and 64 bits.
621 * The canonical representation follows human-readable write convention, aka big-endian (large digits first).
622 * These functions allow transformation of hash result into and from its canonical format.
623 * This way, hash values can be written into a file or buffer, remaining comparable across different systems.
624 */
625
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)626 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
627 {
628 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
629 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
630 memcpy(dst, &hash, sizeof(*dst));
631 }
632
XXH32_hashFromCanonical(const XXH32_canonical_t * src)633 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
634 {
635 return XXH_readBE32(src);
636 }
637
638
639 #ifndef XXH_NO_LONG_LONG
640
641 /* *******************************************************************
642 * 64-bit hash functions
643 *********************************************************************/
644
645 /*====== Memory access ======*/
646
647 #ifndef MEM_MODULE
648 # define MEM_MODULE
649 # if !defined (__VMS) \
650 && (defined (__cplusplus) \
651 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
652 # include <stdint.h>
653 typedef uint64_t U64;
654 # else
655 /* if compiler doesn't support unsigned long long, replace by another 64-bit type */
656 typedef unsigned long long U64;
657 # endif
658 #endif
659
660 /*! XXH_REROLL_XXH64:
661 * Whether to reroll the XXH64_finalize() loop.
662 *
663 * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a performance gain
664 * on 64-bit hosts, as only one jump is required.
665 *
666 * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit registers,
667 * and 64-bit arithmetic needs to be simulated, it isn't beneficial to unroll. The code becomes
668 * ridiculously large (the largest function in the binary on i386!), and rerolling it saves
669 * anywhere from 3kB to 20kB. It is also slightly faster because it fits into cache better
670 * and is more likely to be inlined by the compiler.
671 *
672 * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. */
673 #ifndef XXH_REROLL_XXH64
674 # if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
675 || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
676 || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
677 || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
678 || defined(__mips64__) || defined(__mips64)) /* mips64 */ \
679 || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
680 # define XXH_REROLL_XXH64 1
681 # else
682 # define XXH_REROLL_XXH64 0
683 # endif
684 #endif /* !defined(XXH_REROLL_XXH64) */
685
686 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
687
688 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)689 static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
690
691 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
692
693 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
694 /* currently only defined for gcc and icc */
695 typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
XXH_read64(const void * ptr)696 static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
697
698 #else
699
700 /* portable and safe solution. Generally efficient.
701 * see : http://stackoverflow.com/a/32095106/646947
702 */
703
XXH_read64(const void * memPtr)704 static U64 XXH_read64(const void* memPtr)
705 {
706 U64 val;
707 memcpy(&val, memPtr, sizeof(val));
708 return val;
709 }
710
711 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
712
713 #if defined(_MSC_VER) /* Visual Studio */
714 # define XXH_swap64 _byteswap_uint64
715 #elif XXH_GCC_VERSION >= 403
716 # define XXH_swap64 __builtin_bswap64
717 #else
XXH_swap64(U64 x)718 static U64 XXH_swap64 (U64 x)
719 {
720 return ((x << 56) & 0xff00000000000000ULL) |
721 ((x << 40) & 0x00ff000000000000ULL) |
722 ((x << 24) & 0x0000ff0000000000ULL) |
723 ((x << 8) & 0x000000ff00000000ULL) |
724 ((x >> 8) & 0x00000000ff000000ULL) |
725 ((x >> 24) & 0x0000000000ff0000ULL) |
726 ((x >> 40) & 0x000000000000ff00ULL) |
727 ((x >> 56) & 0x00000000000000ffULL);
728 }
729 #endif
730
XXH_readLE64(const void * ptr)731 XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr)
732 {
733 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
734 }
735
XXH_readBE64(const void * ptr)736 static U64 XXH_readBE64(const void* ptr)
737 {
738 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
739 }
740
741 XXH_FORCE_INLINE U64
XXH_readLE64_align(const void * ptr,XXH_alignment align)742 XXH_readLE64_align(const void* ptr, XXH_alignment align)
743 {
744 if (align==XXH_unaligned)
745 return XXH_readLE64(ptr);
746 else
747 return XXH_CPU_LITTLE_ENDIAN ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
748 }
749
750
751 /*====== xxh64 ======*/
752
753 static const U64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
754 static const U64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
755 static const U64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
756 static const U64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
757 static const U64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
758
XXH64_round(U64 acc,U64 input)759 static U64 XXH64_round(U64 acc, U64 input)
760 {
761 acc += input * PRIME64_2;
762 acc = XXH_rotl64(acc, 31);
763 acc *= PRIME64_1;
764 return acc;
765 }
766
XXH64_mergeRound(U64 acc,U64 val)767 static U64 XXH64_mergeRound(U64 acc, U64 val)
768 {
769 val = XXH64_round(0, val);
770 acc ^= val;
771 acc = acc * PRIME64_1 + PRIME64_4;
772 return acc;
773 }
774
XXH64_avalanche(U64 h64)775 static U64 XXH64_avalanche(U64 h64)
776 {
777 h64 ^= h64 >> 33;
778 h64 *= PRIME64_2;
779 h64 ^= h64 >> 29;
780 h64 *= PRIME64_3;
781 h64 ^= h64 >> 32;
782 return h64;
783 }
784
785
786 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
787
788 static U64
XXH64_finalize(U64 h64,const void * ptr,size_t len,XXH_alignment align)789 XXH64_finalize(U64 h64, const void* ptr, size_t len, XXH_alignment align)
790 {
791 const BYTE* p = (const BYTE*)ptr;
792
793 #define PROCESS1_64 \
794 h64 ^= (*p++) * PRIME64_5; \
795 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
796
797 #define PROCESS4_64 \
798 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
799 p+=4; \
800 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
801
802 #define PROCESS8_64 { \
803 U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
804 p+=8; \
805 h64 ^= k1; \
806 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
807 }
808
809 /* Rerolled version for 32-bit targets is faster and much smaller. */
810 if (XXH_REROLL || XXH_REROLL_XXH64) {
811 len &= 31;
812 while (len >= 8) {
813 PROCESS8_64;
814 len -= 8;
815 }
816 if (len >= 4) {
817 PROCESS4_64;
818 len -= 4;
819 }
820 while (len > 0) {
821 PROCESS1_64;
822 --len;
823 }
824 return XXH64_avalanche(h64);
825 } else {
826 switch(len & 31) {
827 case 24: PROCESS8_64;
828 /* fallthrough */
829 case 16: PROCESS8_64;
830 /* fallthrough */
831 case 8: PROCESS8_64;
832 return XXH64_avalanche(h64);
833
834 case 28: PROCESS8_64;
835 /* fallthrough */
836 case 20: PROCESS8_64;
837 /* fallthrough */
838 case 12: PROCESS8_64;
839 /* fallthrough */
840 case 4: PROCESS4_64;
841 return XXH64_avalanche(h64);
842
843 case 25: PROCESS8_64;
844 /* fallthrough */
845 case 17: PROCESS8_64;
846 /* fallthrough */
847 case 9: PROCESS8_64;
848 PROCESS1_64;
849 return XXH64_avalanche(h64);
850
851 case 29: PROCESS8_64;
852 /* fallthrough */
853 case 21: PROCESS8_64;
854 /* fallthrough */
855 case 13: PROCESS8_64;
856 /* fallthrough */
857 case 5: PROCESS4_64;
858 PROCESS1_64;
859 return XXH64_avalanche(h64);
860
861 case 26: PROCESS8_64;
862 /* fallthrough */
863 case 18: PROCESS8_64;
864 /* fallthrough */
865 case 10: PROCESS8_64;
866 PROCESS1_64;
867 PROCESS1_64;
868 return XXH64_avalanche(h64);
869
870 case 30: PROCESS8_64;
871 /* fallthrough */
872 case 22: PROCESS8_64;
873 /* fallthrough */
874 case 14: PROCESS8_64;
875 /* fallthrough */
876 case 6: PROCESS4_64;
877 PROCESS1_64;
878 PROCESS1_64;
879 return XXH64_avalanche(h64);
880
881 case 27: PROCESS8_64;
882 /* fallthrough */
883 case 19: PROCESS8_64;
884 /* fallthrough */
885 case 11: PROCESS8_64;
886 PROCESS1_64;
887 PROCESS1_64;
888 PROCESS1_64;
889 return XXH64_avalanche(h64);
890
891 case 31: PROCESS8_64;
892 /* fallthrough */
893 case 23: PROCESS8_64;
894 /* fallthrough */
895 case 15: PROCESS8_64;
896 /* fallthrough */
897 case 7: PROCESS4_64;
898 /* fallthrough */
899 case 3: PROCESS1_64;
900 /* fallthrough */
901 case 2: PROCESS1_64;
902 /* fallthrough */
903 case 1: PROCESS1_64;
904 /* fallthrough */
905 case 0: return XXH64_avalanche(h64);
906 }
907 }
908 /* impossible to reach */
909 XXH_ASSERT(0);
910 return 0; /* unreachable, but some compilers complain without it */
911 }
912
913 XXH_FORCE_INLINE U64
XXH64_endian_align(const void * input,size_t len,U64 seed,XXH_alignment align)914 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_alignment align)
915 {
916 const BYTE* p = (const BYTE*)input;
917 const BYTE* bEnd = p + len;
918 U64 h64;
919
920 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
921 if (p==NULL) {
922 len=0;
923 bEnd=p=(const BYTE*)(size_t)32;
924 }
925 #endif
926
927 if (len>=32) {
928 const BYTE* const limit = bEnd - 32;
929 U64 v1 = seed + PRIME64_1 + PRIME64_2;
930 U64 v2 = seed + PRIME64_2;
931 U64 v3 = seed + 0;
932 U64 v4 = seed - PRIME64_1;
933
934 do {
935 v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
936 v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
937 v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
938 v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
939 } while (p<=limit);
940
941 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
942 h64 = XXH64_mergeRound(h64, v1);
943 h64 = XXH64_mergeRound(h64, v2);
944 h64 = XXH64_mergeRound(h64, v3);
945 h64 = XXH64_mergeRound(h64, v4);
946
947 } else {
948 h64 = seed + PRIME64_5;
949 }
950
951 h64 += (U64) len;
952
953 return XXH64_finalize(h64, p, len, align);
954 }
955
956
XXH64(const void * input,size_t len,unsigned long long seed)957 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, unsigned long long seed)
958 {
959 #if 0
960 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
961 XXH64_state_t state;
962 XXH64_reset(&state, seed);
963 XXH64_update(&state, input, len);
964 return XXH64_digest(&state);
965
966 #else
967
968 if (XXH_FORCE_ALIGN_CHECK) {
969 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
970 return XXH64_endian_align(input, len, seed, XXH_aligned);
971 } }
972
973 return XXH64_endian_align(input, len, seed, XXH_unaligned);
974
975 #endif
976 }
977
978 /*====== Hash Streaming ======*/
979
XXH64_createState(void)980 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
981 {
982 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
983 }
XXH64_freeState(XXH64_state_t * statePtr)984 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
985 {
986 XXH_free(statePtr);
987 return XXH_OK;
988 }
989
XXH64_copyState(XXH64_state_t * dstState,const XXH64_state_t * srcState)990 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
991 {
992 memcpy(dstState, srcState, sizeof(*dstState));
993 }
994
XXH64_reset(XXH64_state_t * statePtr,unsigned long long seed)995 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
996 {
997 XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
998 memset(&state, 0, sizeof(state));
999 state.v1 = seed + PRIME64_1 + PRIME64_2;
1000 state.v2 = seed + PRIME64_2;
1001 state.v3 = seed + 0;
1002 state.v4 = seed - PRIME64_1;
1003 /* do not write into reserved, might be removed in a future version */
1004 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
1005 return XXH_OK;
1006 }
1007
1008 XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH64_state_t * state,const void * input,size_t len)1009 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
1010 {
1011 if (input==NULL)
1012 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1013 return XXH_OK;
1014 #else
1015 return XXH_ERROR;
1016 #endif
1017
1018 { const BYTE* p = (const BYTE*)input;
1019 const BYTE* const bEnd = p + len;
1020
1021 state->total_len += len;
1022
1023 if (state->memsize + len < 32) { /* fill in tmp buffer */
1024 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
1025 state->memsize += (U32)len;
1026 return XXH_OK;
1027 }
1028
1029 if (state->memsize) { /* tmp buffer is full */
1030 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
1031 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
1032 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
1033 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
1034 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
1035 p += 32-state->memsize;
1036 state->memsize = 0;
1037 }
1038
1039 if (p+32 <= bEnd) {
1040 const BYTE* const limit = bEnd - 32;
1041 U64 v1 = state->v1;
1042 U64 v2 = state->v2;
1043 U64 v3 = state->v3;
1044 U64 v4 = state->v4;
1045
1046 do {
1047 v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
1048 v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
1049 v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
1050 v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
1051 } while (p<=limit);
1052
1053 state->v1 = v1;
1054 state->v2 = v2;
1055 state->v3 = v3;
1056 state->v4 = v4;
1057 }
1058
1059 if (p < bEnd) {
1060 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
1061 state->memsize = (unsigned)(bEnd-p);
1062 }
1063 }
1064
1065 return XXH_OK;
1066 }
1067
1068
XXH64_digest(const XXH64_state_t * state)1069 XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
1070 {
1071 U64 h64;
1072
1073 if (state->total_len >= 32) {
1074 U64 const v1 = state->v1;
1075 U64 const v2 = state->v2;
1076 U64 const v3 = state->v3;
1077 U64 const v4 = state->v4;
1078
1079 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
1080 h64 = XXH64_mergeRound(h64, v1);
1081 h64 = XXH64_mergeRound(h64, v2);
1082 h64 = XXH64_mergeRound(h64, v3);
1083 h64 = XXH64_mergeRound(h64, v4);
1084 } else {
1085 h64 = state->v3 /*seed*/ + PRIME64_5;
1086 }
1087
1088 h64 += (U64) state->total_len;
1089
1090 return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, XXH_aligned);
1091 }
1092
1093
1094 /*====== Canonical representation ======*/
1095
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)1096 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
1097 {
1098 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
1099 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
1100 memcpy(dst, &hash, sizeof(*dst));
1101 }
1102
XXH64_hashFromCanonical(const XXH64_canonical_t * src)1103 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
1104 {
1105 return XXH_readBE64(src);
1106 }
1107
1108
1109
1110 /* *********************************************************************
1111 * XXH3
1112 * New generation hash designed for speed on small keys and vectorization
1113 ************************************************************************ */
1114
1115 #include "arrow/vendored/xxhash/xxh3.h"
1116
1117
1118 #endif /* XXH_NO_LONG_LONG */
1119