1 /*
2 * xxHash - Extremely Fast Hash algorithm
3 * Header File
4 * Copyright (C) 2012-2020 Yann Collet
5 *
6 * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
10 * met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following disclaimer
16 * in the documentation and/or other materials provided with the
17 * distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * You can contact the author at:
32 * - xxHash homepage: https://www.xxhash.com
33 * - xxHash source repository: https://github.com/Cyan4973/xxHash
34 */
35 /*!
36 * @mainpage xxHash
37 *
38 * @file xxhash.h
39 * xxHash prototypes and implementation
40 */
41 /* TODO: update */
42 /* Notice extracted from xxHash homepage:
43
44 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
45 It also successfully passes all tests from the SMHasher suite.
46
47 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
48
49 Name Speed Q.Score Author
50 xxHash 5.4 GB/s 10
51 CrapWow 3.2 GB/s 2 Andrew
52 MurmurHash 3a 2.7 GB/s 10 Austin Appleby
53 SpookyHash 2.0 GB/s 10 Bob Jenkins
54 SBox 1.4 GB/s 9 Bret Mulvey
55 Lookup3 1.2 GB/s 9 Bob Jenkins
56 SuperFastHash 1.2 GB/s 1 Paul Hsieh
57 CityHash64 1.05 GB/s 10 Pike & Alakuijala
58 FNV 0.55 GB/s 5 Fowler, Noll, Vo
59 CRC32 0.43 GB/s 9
60 MD5-32 0.33 GB/s 10 Ronald L. Rivest
61 SHA1-32 0.28 GB/s 10
62
63 Q.Score is a measure of quality of the hash function.
64 It depends on successfully passing SMHasher test set.
65 10 is a perfect score.
66
67 Note: SMHasher's CRC32 implementation is not the fastest one.
68 Other speed-oriented implementations can be faster,
69 especially in combination with PCLMUL instruction:
70 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
71
72 A 64-bit version, named XXH64, is available since r35.
73 It offers much better speed, but for 64-bit applications only.
74 Name Speed on 64 bits Speed on 32 bits
75 XXH64 13.8 GB/s 1.9 GB/s
76 XXH32 6.8 GB/s 6.0 GB/s
77 */
78
79 #if defined (__cplusplus)
80 extern "C" {
81 #endif
82
83 /* ****************************
84 * INLINE mode
85 ******************************/
86 /*!
87 * XXH_INLINE_ALL (and XXH_PRIVATE_API)
88 * Use these build macros to inline xxhash into the target unit.
89 * Inlining improves performance on small inputs, especially when the length is
90 * expressed as a compile-time constant:
91 *
92 * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
93 *
94 * It also keeps xxHash symbols private to the unit, so they are not exported.
95 *
96 * Usage:
97 * #define XXH_INLINE_ALL
98 * #include "xxhash.h"
99 *
100 * Do not compile and link xxhash.o as a separate object, as it is not useful.
101 */
102 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
103 && !defined(XXH_INLINE_ALL_31684351384)
104 /* this section should be traversed only once */
105 # define XXH_INLINE_ALL_31684351384
106 /* give access to the advanced API, required to compile implementations */
107 # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
108 # define XXH_STATIC_LINKING_ONLY
109 /* make all functions private */
110 # undef XXH_PUBLIC_API
111 # if defined(__GNUC__)
112 # define XXH_PUBLIC_API static __inline __attribute__((unused))
113 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
114 # define XXH_PUBLIC_API static inline
115 # elif defined(_MSC_VER)
116 # define XXH_PUBLIC_API static __inline
117 # else
118 /* note: this version may generate warnings for unused static functions */
119 # define XXH_PUBLIC_API static
120 # endif
121
122 /*
123 * This part deals with the special case where a unit wants to inline xxHash,
124 * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
125 * such as part of some previously included *.h header file.
126 * Without further action, the new include would just be ignored,
127 * and functions would effectively _not_ be inlined (silent failure).
128 * The following macros solve this situation by prefixing all inlined names,
129 * avoiding naming collision with previous inclusions.
130 */
131 /* Before that, we unconditionally #undef all symbols,
132 * in case they were already defined with XXH_NAMESPACE.
133 * They will then be redefined for XXH_INLINE_ALL
134 */
135 # undef XXH_versionNumber
136 /* XXH32 */
137 # undef XXH32
138 # undef XXH32_createState
139 # undef XXH32_freeState
140 # undef XXH32_reset
141 # undef XXH32_update
142 # undef XXH32_digest
143 # undef XXH32_copyState
144 # undef XXH32_canonicalFromHash
145 # undef XXH32_hashFromCanonical
146 /* XXH64 */
147 # undef XXH64
148 # undef XXH64_createState
149 # undef XXH64_freeState
150 # undef XXH64_reset
151 # undef XXH64_update
152 # undef XXH64_digest
153 # undef XXH64_copyState
154 # undef XXH64_canonicalFromHash
155 # undef XXH64_hashFromCanonical
156 /* XXH3_64bits */
157 # undef XXH3_64bits
158 # undef XXH3_64bits_withSecret
159 # undef XXH3_64bits_withSeed
160 # undef XXH3_64bits_withSecretandSeed
161 # undef XXH3_createState
162 # undef XXH3_freeState
163 # undef XXH3_copyState
164 # undef XXH3_64bits_reset
165 # undef XXH3_64bits_reset_withSeed
166 # undef XXH3_64bits_reset_withSecret
167 # undef XXH3_64bits_update
168 # undef XXH3_64bits_digest
169 # undef XXH3_generateSecret
170 /* XXH3_128bits */
171 # undef XXH128
172 # undef XXH3_128bits
173 # undef XXH3_128bits_withSeed
174 # undef XXH3_128bits_withSecret
175 # undef XXH3_128bits_reset
176 # undef XXH3_128bits_reset_withSeed
177 # undef XXH3_128bits_reset_withSecret
178 # undef XXH3_128bits_reset_withSecretandSeed
179 # undef XXH3_128bits_update
180 # undef XXH3_128bits_digest
181 # undef XXH128_isEqual
182 # undef XXH128_cmp
183 # undef XXH128_canonicalFromHash
184 # undef XXH128_hashFromCanonical
185 /* Finally, free the namespace itself */
186 # undef XXH_NAMESPACE
187
188 /* employ the namespace for XXH_INLINE_ALL */
189 # define XXH_NAMESPACE XXH_INLINE_
190 /*
191 * Some identifiers (enums, type names) are not symbols,
192 * but they must nonetheless be renamed to avoid redeclaration.
193 * Alternative solution: do not redeclare them.
194 * However, this requires some #ifdefs, and has a more dispersed impact.
195 * Meanwhile, renaming can be achieved in a single place.
196 */
197 # define XXH_IPREF(Id) XXH_NAMESPACE ## Id
198 # define XXH_OK XXH_IPREF(XXH_OK)
199 # define XXH_ERROR XXH_IPREF(XXH_ERROR)
200 # define XXH_errorcode XXH_IPREF(XXH_errorcode)
201 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
202 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
203 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
204 # define XXH32_state_s XXH_IPREF(XXH32_state_s)
205 # define XXH32_state_t XXH_IPREF(XXH32_state_t)
206 # define XXH64_state_s XXH_IPREF(XXH64_state_s)
207 # define XXH64_state_t XXH_IPREF(XXH64_state_t)
208 # define XXH3_state_s XXH_IPREF(XXH3_state_s)
209 # define XXH3_state_t XXH_IPREF(XXH3_state_t)
210 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
211 /* Ensure the header is parsed again, even if it was previously included */
212 # undef XXHASH_H_5627135585666179
213 # undef XXHASH_H_STATIC_13879238742
214 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
215
216
217
218 /* ****************************************************************
219 * Stable API
220 *****************************************************************/
221 #ifndef XXHASH_H_5627135585666179
222 #define XXHASH_H_5627135585666179 1
223
224
225 /*!
226 * @defgroup public Public API
227 * Contains details on the public xxHash functions.
228 * @{
229 */
230 /* specific declaration modes for Windows */
231 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
232 # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
233 # ifdef XXH_EXPORT
234 # define XXH_PUBLIC_API __declspec(dllexport)
235 # elif XXH_IMPORT
236 # define XXH_PUBLIC_API __declspec(dllimport)
237 # endif
238 # else
239 # define XXH_PUBLIC_API /* do nothing */
240 # endif
241 #endif
242
243 #ifdef XXH_DOXYGEN
244 /*!
245 * @brief Emulate a namespace by transparently prefixing all symbols.
246 *
247 * If you want to include _and expose_ xxHash functions from within your own
248 * library, but also want to avoid symbol collisions with other libraries which
249 * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
250 * any public symbol from xxhash library with the value of XXH_NAMESPACE
251 * (therefore, avoid empty or numeric values).
252 *
253 * Note that no change is required within the calling program as long as it
254 * includes `xxhash.h`: Regular symbol names will be automatically translated
255 * by this header.
256 */
257 # define XXH_NAMESPACE /* YOUR NAME HERE */
258 # undef XXH_NAMESPACE
259 #endif
260
261 #ifdef XXH_NAMESPACE
262 # define XXH_CAT(A,B) A##B
263 # define XXH_NAME2(A,B) XXH_CAT(A,B)
264 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
265 /* XXH32 */
266 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
267 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
268 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
269 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
270 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
271 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
272 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
273 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
274 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
275 /* XXH64 */
276 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
277 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
278 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
279 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
280 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
281 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
282 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
283 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
284 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
285 /* XXH3_64bits */
286 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
287 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
288 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
289 # define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
290 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
291 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
292 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
293 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
294 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
295 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
296 # define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
297 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
298 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
299 # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
300 # define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
301 /* XXH3_128bits */
302 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
303 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
304 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
305 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
306 # define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
307 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
308 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
309 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
310 # define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
311 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
312 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
313 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
314 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
315 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
316 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
317 #endif
318
319
320 /* *************************************
321 * Version
322 ***************************************/
323 #define XXH_VERSION_MAJOR 0
324 #define XXH_VERSION_MINOR 8
325 #define XXH_VERSION_RELEASE 1
326 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
327
328 /*!
329 * @brief Obtains the xxHash version.
330 *
331 * This is mostly useful when xxHash is compiled as a shared library,
332 * since the returned value comes from the library, as opposed to header file.
333 *
334 * @return `XXH_VERSION_NUMBER` of the invoked library.
335 */
336 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
337
338
339 /* ****************************
340 * Common basic types
341 ******************************/
342 #include <stddef.h> /* size_t */
343 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
344
345
346 /*-**********************************************************************
347 * 32-bit hash
348 ************************************************************************/
349 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
350 /*!
351 * @brief An unsigned 32-bit integer.
352 *
353 * Not necessarily defined to `uint32_t` but functionally equivalent.
354 */
355 typedef uint32_t XXH32_hash_t;
356
357 #elif !defined (__VMS) \
358 && (defined (__cplusplus) \
359 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
360 # include <stdint.h>
361 typedef uint32_t XXH32_hash_t;
362
363 #else
364 # include <limits.h>
365 # if UINT_MAX == 0xFFFFFFFFUL
366 typedef unsigned int XXH32_hash_t;
367 # else
368 # if ULONG_MAX == 0xFFFFFFFFUL
369 typedef unsigned long XXH32_hash_t;
370 # else
371 # error "unsupported platform: need a 32-bit type"
372 # endif
373 # endif
374 #endif
375
376 /*!
377 * @}
378 *
379 * @defgroup xxh32_family XXH32 family
380 * @ingroup public
381 * Contains functions used in the classic 32-bit xxHash algorithm.
382 *
383 * @note
384 * XXH32 is useful for older platforms, with no or poor 64-bit performance.
385 * Note that @ref xxh3_family provides competitive speed
386 * for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results.
387 *
388 * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
389 * @see @ref xxh32_impl for implementation details
390 * @{
391 */
392
393 /*!
394 * @brief Calculates the 32-bit hash of @p input using xxHash32.
395 *
396 * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
397 *
398 * @param input The block of data to be hashed, at least @p length bytes in size.
399 * @param length The length of @p input, in bytes.
400 * @param seed The 32-bit seed to alter the hash's output predictably.
401 *
402 * @pre
403 * The memory between @p input and @p input + @p length must be valid,
404 * readable, contiguous memory. However, if @p length is `0`, @p input may be
405 * `NULL`. In C++, this also must be *TriviallyCopyable*.
406 *
407 * @return The calculated 32-bit hash value.
408 *
409 * @see
410 * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
411 * Direct equivalents for the other variants of xxHash.
412 * @see
413 * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
414 */
415 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
416
417 /*!
418 * Streaming functions generate the xxHash value from an incremental input.
419 * This method is slower than single-call functions, due to state management.
420 * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
421 *
422 * An XXH state must first be allocated using `XXH*_createState()`.
423 *
424 * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
425 *
426 * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
427 *
428 * The function returns an error code, with 0 meaning OK, and any other value
429 * meaning there is an error.
430 *
431 * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
432 * This function returns the nn-bits hash as an int or long long.
433 *
434 * It's still possible to continue inserting input into the hash state after a
435 * digest, and generate new hash values later on by invoking `XXH*_digest()`.
436 *
437 * When done, release the state using `XXH*_freeState()`.
438 *
439 * Example code for incrementally hashing a file:
440 * @code{.c}
441 * #include <stdio.h>
442 * #include <xxhash.h>
443 * #define BUFFER_SIZE 256
444 *
445 * // Note: XXH64 and XXH3 use the same interface.
446 * XXH32_hash_t
447 * hashFile(FILE* stream)
448 * {
449 * XXH32_state_t* state;
450 * unsigned char buf[BUFFER_SIZE];
451 * size_t amt;
452 * XXH32_hash_t hash;
453 *
454 * state = XXH32_createState(); // Create a state
455 * assert(state != NULL); // Error check here
456 * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed
457 * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
458 * XXH32_update(state, buf, amt); // Hash the file in chunks
459 * }
460 * hash = XXH32_digest(state); // Finalize the hash
461 * XXH32_freeState(state); // Clean up
462 * return hash;
463 * }
464 * @endcode
465 */
466
467 /*!
468 * @typedef struct XXH32_state_s XXH32_state_t
469 * @brief The opaque state struct for the XXH32 streaming API.
470 *
471 * @see XXH32_state_s for details.
472 */
473 typedef struct XXH32_state_s XXH32_state_t;
474
475 /*!
476 * @brief Allocates an @ref XXH32_state_t.
477 *
478 * Must be freed with XXH32_freeState().
479 * @return An allocated XXH32_state_t on success, `NULL` on failure.
480 */
481 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
482 /*!
483 * @brief Frees an @ref XXH32_state_t.
484 *
485 * Must be allocated with XXH32_createState().
486 * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
487 * @return XXH_OK.
488 */
489 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
490 /*!
491 * @brief Copies one @ref XXH32_state_t to another.
492 *
493 * @param dst_state The state to copy to.
494 * @param src_state The state to copy from.
495 * @pre
496 * @p dst_state and @p src_state must not be `NULL` and must not overlap.
497 */
498 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
499
500 /*!
501 * @brief Resets an @ref XXH32_state_t to begin a new hash.
502 *
503 * This function resets and seeds a state. Call it before @ref XXH32_update().
504 *
505 * @param statePtr The state struct to reset.
506 * @param seed The 32-bit seed to alter the hash result predictably.
507 *
508 * @pre
509 * @p statePtr must not be `NULL`.
510 *
511 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
512 */
513 XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
514
515 /*!
516 * @brief Consumes a block of @p input to an @ref XXH32_state_t.
517 *
518 * Call this to incrementally consume blocks of data.
519 *
520 * @param statePtr The state struct to update.
521 * @param input The block of data to be hashed, at least @p length bytes in size.
522 * @param length The length of @p input, in bytes.
523 *
524 * @pre
525 * @p statePtr must not be `NULL`.
526 * @pre
527 * The memory between @p input and @p input + @p length must be valid,
528 * readable, contiguous memory. However, if @p length is `0`, @p input may be
529 * `NULL`. In C++, this also must be *TriviallyCopyable*.
530 *
531 * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
532 */
533 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
534
535 /*!
536 * @brief Returns the calculated hash value from an @ref XXH32_state_t.
537 *
538 * @note
539 * Calling XXH32_digest() will not affect @p statePtr, so you can update,
540 * digest, and update again.
541 *
542 * @param statePtr The state struct to calculate the hash from.
543 *
544 * @pre
545 * @p statePtr must not be `NULL`.
546 *
547 * @return The calculated xxHash32 value from that state.
548 */
549 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
550
551 /******* Canonical representation *******/
552
553 /*
554 * The default return values from XXH functions are unsigned 32 and 64 bit
555 * integers.
556 * This the simplest and fastest format for further post-processing.
557 *
558 * However, this leaves open the question of what is the order on the byte level,
559 * since little and big endian conventions will store the same number differently.
560 *
561 * The canonical representation settles this issue by mandating big-endian
562 * convention, the same convention as human-readable numbers (large digits first).
563 *
564 * When writing hash values to storage, sending them over a network, or printing
565 * them, it's highly recommended to use the canonical representation to ensure
566 * portability across a wider range of systems, present and future.
567 *
568 * The following functions allow transformation of hash values to and from
569 * canonical format.
570 */
571
572 /*!
573 * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
574 */
575 typedef struct {
576 unsigned char digest[4]; /*!< Hash bytes, big endian */
577 } XXH32_canonical_t;
578
579 /*!
580 * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
581 *
582 * @param dst The @ref XXH32_canonical_t pointer to be stored to.
583 * @param hash The @ref XXH32_hash_t to be converted.
584 *
585 * @pre
586 * @p dst must not be `NULL`.
587 */
588 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
589
590 /*!
591 * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
592 *
593 * @param src The @ref XXH32_canonical_t to convert.
594 *
595 * @pre
596 * @p src must not be `NULL`.
597 *
598 * @return The converted hash.
599 */
600 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
601
602
603 #ifdef __has_attribute
604 # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
605 #else
606 # define XXH_HAS_ATTRIBUTE(x) 0
607 #endif
608
609 /* C-language Attributes are added in C23. */
610 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
611 # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
612 #else
613 # define XXH_HAS_C_ATTRIBUTE(x) 0
614 #endif
615
616 #if defined(__cplusplus) && defined(__has_cpp_attribute)
617 # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
618 #else
619 # define XXH_HAS_CPP_ATTRIBUTE(x) 0
620 #endif
621
622 /*
623 Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
624 introduced in CPP17 and C23.
625 CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
626 C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
627 */
628 #if XXH_HAS_C_ATTRIBUTE(x)
629 # define XXH_FALLTHROUGH [[fallthrough]]
630 #elif XXH_HAS_CPP_ATTRIBUTE(x)
631 # define XXH_FALLTHROUGH [[fallthrough]]
632 #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
633 # define XXH_FALLTHROUGH __attribute__ ((fallthrough))
634 #else
635 # define XXH_FALLTHROUGH
636 #endif
637
638 /*!
639 * @}
640 * @ingroup public
641 * @{
642 */
643
644 #ifndef XXH_NO_LONG_LONG
645 /*-**********************************************************************
646 * 64-bit hash
647 ************************************************************************/
648 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
649 /*!
650 * @brief An unsigned 64-bit integer.
651 *
652 * Not necessarily defined to `uint64_t` but functionally equivalent.
653 */
654 typedef uint64_t XXH64_hash_t;
655 #elif !defined (__VMS) \
656 && (defined (__cplusplus) \
657 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
658 # include <stdint.h>
659 typedef uint64_t XXH64_hash_t;
660 #else
661 # include <limits.h>
662 # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
663 /* LP64 ABI says uint64_t is unsigned long */
664 typedef unsigned long XXH64_hash_t;
665 # else
666 /* the following type must have a width of 64-bit */
667 typedef unsigned long long XXH64_hash_t;
668 # endif
669 #endif
670
671 /*!
672 * @}
673 *
674 * @defgroup xxh64_family XXH64 family
675 * @ingroup public
676 * @{
677 * Contains functions used in the classic 64-bit xxHash algorithm.
678 *
679 * @note
680 * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
681 * and offers true 64/128 bit hash results.
682 * It provides better speed for systems with vector processing capabilities.
683 */
684
685
686 /*!
687 * @brief Calculates the 64-bit hash of @p input using xxHash64.
688 *
689 * This function usually runs faster on 64-bit systems, but slower on 32-bit
690 * systems (see benchmark).
691 *
692 * @param input The block of data to be hashed, at least @p length bytes in size.
693 * @param length The length of @p input, in bytes.
694 * @param seed The 64-bit seed to alter the hash's output predictably.
695 *
696 * @pre
697 * The memory between @p input and @p input + @p length must be valid,
698 * readable, contiguous memory. However, if @p length is `0`, @p input may be
699 * `NULL`. In C++, this also must be *TriviallyCopyable*.
700 *
701 * @return The calculated 64-bit hash.
702 *
703 * @see
704 * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
705 * Direct equivalents for the other variants of xxHash.
706 * @see
707 * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
708 */
709 XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
710
711 /******* Streaming *******/
712 /*!
713 * @brief The opaque state struct for the XXH64 streaming API.
714 *
715 * @see XXH64_state_s for details.
716 */
717 typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
718 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
719 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
720 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
721
722 XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
723 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
724 XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
725
726 /******* Canonical representation *******/
727 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
728 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
729 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
730
731 /*!
732 * @}
733 * ************************************************************************
734 * @defgroup xxh3_family XXH3 family
735 * @ingroup public
736 * @{
737 *
738 * XXH3 is a more recent hash algorithm featuring:
739 * - Improved speed for both small and large inputs
740 * - True 64-bit and 128-bit outputs
741 * - SIMD acceleration
742 * - Improved 32-bit viability
743 *
744 * Speed analysis methodology is explained here:
745 *
746 * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
747 *
748 * Compared to XXH64, expect XXH3 to run approximately
749 * ~2x faster on large inputs and >3x faster on small ones,
750 * exact differences vary depending on platform.
751 *
752 * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
753 * but does not require it.
754 * Any 32-bit and 64-bit targets that can run XXH32 smoothly
755 * can run XXH3 at competitive speeds, even without vector support.
756 * Further details are explained in the implementation.
757 *
758 * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
759 * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
760 *
761 * XXH3 implementation is portable:
762 * it has a generic C90 formulation that can be compiled on any platform,
763 * all implementations generage exactly the same hash value on all platforms.
764 * Starting from v0.8.0, it's also labelled "stable", meaning that
765 * any future version will also generate the same hash value.
766 *
767 * XXH3 offers 2 variants, _64bits and _128bits.
768 *
769 * When only 64 bits are needed, prefer invoking the _64bits variant, as it
770 * reduces the amount of mixing, resulting in faster speed on small inputs.
771 * It's also generally simpler to manipulate a scalar return type than a struct.
772 *
773 * The API supports one-shot hashing, streaming mode, and custom secrets.
774 */
775
776 /*-**********************************************************************
777 * XXH3 64-bit variant
778 ************************************************************************/
779
780 /* XXH3_64bits():
781 * default 64-bit variant, using default secret and default seed of 0.
782 * It's the fastest variant. */
783 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
784
785 /*
786 * XXH3_64bits_withSeed():
787 * This variant generates a custom secret on the fly
788 * based on default secret altered using the `seed` value.
789 * While this operation is decently fast, note that it's not completely free.
790 * Note: seed==0 produces the same results as XXH3_64bits().
791 */
792 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
793
794 /*!
795 * The bare minimum size for a custom secret.
796 *
797 * @see
798 * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
799 * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
800 */
801 #define XXH3_SECRET_SIZE_MIN 136
802
803 /*
804 * XXH3_64bits_withSecret():
805 * It's possible to provide any blob of bytes as a "secret" to generate the hash.
806 * This makes it more difficult for an external actor to prepare an intentional collision.
807 * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
808 * However, the quality of the secret impacts the dispersion of the hash algorithm.
809 * Therefore, the secret _must_ look like a bunch of random bytes.
810 * Avoid "trivial" or structured data such as repeated sequences or a text document.
811 * Whenever in doubt about the "randomness" of the blob of bytes,
812 * consider employing "XXH3_generateSecret()" instead (see below).
813 * It will generate a proper high entropy secret derived from the blob of bytes.
814 * Another advantage of using XXH3_generateSecret() is that
815 * it guarantees that all bits within the initial blob of bytes
816 * will impact every bit of the output.
817 * This is not necessarily the case when using the blob of bytes directly
818 * because, when hashing _small_ inputs, only a portion of the secret is employed.
819 */
820 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
821
822
823 /******* Streaming *******/
824 /*
825 * Streaming requires state maintenance.
826 * This operation costs memory and CPU.
827 * As a consequence, streaming is slower than one-shot hashing.
828 * For better performance, prefer one-shot functions whenever applicable.
829 */
830
831 /*!
832 * @brief The state struct for the XXH3 streaming API.
833 *
834 * @see XXH3_state_s for details.
835 */
836 typedef struct XXH3_state_s XXH3_state_t;
837 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
838 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
839 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
840
841 /*
842 * XXH3_64bits_reset():
843 * Initialize with default parameters.
844 * digest will be equivalent to `XXH3_64bits()`.
845 */
846 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
847 /*
848 * XXH3_64bits_reset_withSeed():
849 * Generate a custom secret from `seed`, and store it into `statePtr`.
850 * digest will be equivalent to `XXH3_64bits_withSeed()`.
851 */
852 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
853 /*
854 * XXH3_64bits_reset_withSecret():
855 * `secret` is referenced, it _must outlive_ the hash streaming session.
856 * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
857 * and the quality of produced hash values depends on secret's entropy
858 * (secret's content should look like a bunch of random bytes).
859 * When in doubt about the randomness of a candidate `secret`,
860 * consider employing `XXH3_generateSecret()` instead (see below).
861 */
862 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
863
864 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
865 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
866
867 /* note : canonical representation of XXH3 is the same as XXH64
868 * since they both produce XXH64_hash_t values */
869
870
871 /*-**********************************************************************
872 * XXH3 128-bit variant
873 ************************************************************************/
874
875 /*!
876 * @brief The return value from 128-bit hashes.
877 *
878 * Stored in little endian order, although the fields themselves are in native
879 * endianness.
880 */
881 typedef struct {
882 XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
883 XXH64_hash_t high64; /*!< `value >> 64` */
884 } XXH128_hash_t;
885
886 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
887 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
888 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
889
890 /******* Streaming *******/
891 /*
892 * Streaming requires state maintenance.
893 * This operation costs memory and CPU.
894 * As a consequence, streaming is slower than one-shot hashing.
895 * For better performance, prefer one-shot functions whenever applicable.
896 *
897 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
898 * Use already declared XXH3_createState() and XXH3_freeState().
899 *
900 * All reset and streaming functions have same meaning as their 64-bit counterpart.
901 */
902
903 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
904 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
905 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
906
907 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
908 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
909
910 /* Following helper functions make it possible to compare XXH128_hast_t values.
911 * Since XXH128_hash_t is a structure, this capability is not offered by the language.
912 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
913
914 /*!
915 * XXH128_isEqual():
916 * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
917 */
918 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
919
920 /*!
921 * XXH128_cmp():
922 *
923 * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
924 *
925 * return: >0 if *h128_1 > *h128_2
926 * =0 if *h128_1 == *h128_2
927 * <0 if *h128_1 < *h128_2
928 */
929 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
930
931
932 /******* Canonical representation *******/
933 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
934 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
935 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
936
937
938 #endif /* XXH_NO_LONG_LONG */
939
940 /*!
941 * @}
942 */
943 #endif /* XXHASH_H_5627135585666179 */
944
945
946
947 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
948 #define XXHASH_H_STATIC_13879238742
949 /* ****************************************************************************
950 * This section contains declarations which are not guaranteed to remain stable.
951 * They may change in future versions, becoming incompatible with a different
952 * version of the library.
953 * These declarations should only be used with static linking.
954 * Never use them in association with dynamic linking!
955 ***************************************************************************** */
956
957 /*
958 * These definitions are only present to allow static allocation
959 * of XXH states, on stack or in a struct, for example.
960 * Never **ever** access their members directly.
961 */
962
963 /*!
964 * @internal
965 * @brief Structure for XXH32 streaming API.
966 *
967 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
968 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
969 * an opaque type. This allows fields to safely be changed.
970 *
971 * Typedef'd to @ref XXH32_state_t.
972 * Do not access the members of this struct directly.
973 * @see XXH64_state_s, XXH3_state_s
974 */
975 struct XXH32_state_s {
976 XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
977 XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
978 XXH32_hash_t v[4]; /*!< Accumulator lanes */
979 XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
980 XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
981 XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
982 }; /* typedef'd to XXH32_state_t */
983
984
985 #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
986
987 /*!
988 * @internal
989 * @brief Structure for XXH64 streaming API.
990 *
991 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
992 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
993 * an opaque type. This allows fields to safely be changed.
994 *
995 * Typedef'd to @ref XXH64_state_t.
996 * Do not access the members of this struct directly.
997 * @see XXH32_state_s, XXH3_state_s
998 */
999 struct XXH64_state_s {
1000 XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
1001 XXH64_hash_t v[4]; /*!< Accumulator lanes */
1002 XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
1003 XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
1004 XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
1005 XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
1006 }; /* typedef'd to XXH64_state_t */
1007
1008 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1009 # include <stdalign.h>
1010 # define XXH_ALIGN(n) alignas(n)
1011 #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1012 /* In C++ alignas() is a keyword */
1013 # define XXH_ALIGN(n) alignas(n)
1014 #elif defined(__GNUC__)
1015 # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1016 #elif defined(_MSC_VER)
1017 # define XXH_ALIGN(n) __declspec(align(n))
1018 #else
1019 # define XXH_ALIGN(n) /* disabled */
1020 #endif
1021
1022 /* Old GCC versions only accept the attribute after the type in structures. */
1023 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
1024 && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1025 && defined(__GNUC__)
1026 # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1027 #else
1028 # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1029 #endif
1030
1031 /*!
1032 * @brief The size of the internal XXH3 buffer.
1033 *
1034 * This is the optimal update size for incremental hashing.
1035 *
1036 * @see XXH3_64b_update(), XXH3_128b_update().
1037 */
1038 #define XXH3_INTERNALBUFFER_SIZE 256
1039
1040 /*!
1041 * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
1042 *
1043 * This is the size used in @ref XXH3_kSecret and the seeded functions.
1044 *
1045 * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
1046 */
1047 #define XXH3_SECRET_DEFAULT_SIZE 192
1048
1049 /*!
1050 * @internal
1051 * @brief Structure for XXH3 streaming API.
1052 *
1053 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1054 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
1055 * Otherwise it is an opaque type.
1056 * Never use this definition in combination with dynamic library.
1057 * This allows fields to safely be changed in the future.
1058 *
1059 * @note ** This structure has a strict alignment requirement of 64 bytes!! **
1060 * Do not allocate this with `malloc()` or `new`,
1061 * it will not be sufficiently aligned.
1062 * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
1063 *
1064 * Typedef'd to @ref XXH3_state_t.
1065 * Do never access the members of this struct directly.
1066 *
1067 * @see XXH3_INITSTATE() for stack initialization.
1068 * @see XXH3_createState(), XXH3_freeState().
1069 * @see XXH32_state_s, XXH64_state_s
1070 */
1071 struct XXH3_state_s {
1072 XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1073 /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
1074 XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1075 /*!< Used to store a custom secret generated from a seed. */
1076 XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1077 /*!< The internal buffer. @see XXH32_state_s::mem32 */
1078 XXH32_hash_t bufferedSize;
1079 /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
1080 XXH32_hash_t useSeed;
1081 /*!< Reserved field. Needed for padding on 64-bit. */
1082 size_t nbStripesSoFar;
1083 /*!< Number or stripes processed. */
1084 XXH64_hash_t totalLen;
1085 /*!< Total length hashed. 64-bit even on 32-bit targets. */
1086 size_t nbStripesPerBlock;
1087 /*!< Number of stripes per block. */
1088 size_t secretLimit;
1089 /*!< Size of @ref customSecret or @ref extSecret */
1090 XXH64_hash_t seed;
1091 /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1092 XXH64_hash_t reserved64;
1093 /*!< Reserved field. */
1094 const unsigned char* extSecret;
1095 /*!< Reference to an external secret for the _withSecret variants, NULL
1096 * for other variants. */
1097 /* note: there may be some padding at the end due to alignment on 64 bytes */
1098 }; /* typedef'd to XXH3_state_t */
1099
1100 #undef XXH_ALIGN_MEMBER
1101
1102 /*!
1103 * @brief Initializes a stack-allocated `XXH3_state_s`.
1104 *
1105 * When the @ref XXH3_state_t structure is merely emplaced on stack,
1106 * it should be initialized with XXH3_INITSTATE() or a memset()
1107 * in case its first reset uses XXH3_NNbits_reset_withSeed().
1108 * This init can be omitted if the first reset uses default or _withSecret mode.
1109 * This operation isn't necessary when the state is created with XXH3_createState().
1110 * Note that this doesn't prepare the state for a streaming operation,
1111 * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1112 */
1113 #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
1114
1115
1116 /* XXH128() :
1117 * simple alias to pre-selected XXH3_128bits variant
1118 */
1119 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1120
1121
1122 /* === Experimental API === */
1123 /* Symbols defined below must be considered tied to a specific library version. */
1124
1125 /*
1126 * XXH3_generateSecret():
1127 *
1128 * Derive a high-entropy secret from any user-defined content, named customSeed.
1129 * The generated secret can be used in combination with `*_withSecret()` functions.
1130 * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1131 * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1132 *
1133 * The function accepts as input a custom seed of any length and any content,
1134 * and derives from it a high-entropy secret of length @secretSize
1135 * into an already allocated buffer @secretBuffer.
1136 * @secretSize must be >= XXH3_SECRET_SIZE_MIN
1137 *
1138 * The generated secret can then be used with any `*_withSecret()` variant.
1139 * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1140 * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1141 * are part of this list. They all accept a `secret` parameter
1142 * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1143 * _and_ feature very high entropy (consist of random-looking bytes).
1144 * These conditions can be a high bar to meet, so
1145 * XXH3_generateSecret() can be employed to ensure proper quality.
1146 *
1147 * customSeed can be anything. It can have any size, even small ones,
1148 * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes.
1149 * The resulting `secret` will nonetheless provide all required qualities.
1150 *
1151 * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1152 */
1153 XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
1154
1155
1156 /*
1157 * XXH3_generateSecret_fromSeed():
1158 *
1159 * Generate the same secret as the _withSeed() variants.
1160 *
1161 * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily).
1162 * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes.
1163 *
1164 * The generated secret can be used in combination with
1165 *`*_withSecret()` and `_withSecretandSeed()` variants.
1166 * This generator is notably useful in combination with `_withSecretandSeed()`,
1167 * as a way to emulate a faster `_withSeed()` variant.
1168 */
1169 XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
1170
1171 /*
1172 * *_withSecretandSeed() :
1173 * These variants generate hash values using either
1174 * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
1175 * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX).
1176 *
1177 * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1178 * `_withSeed()` has to generate the secret on the fly for "large" keys.
1179 * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1180 * `_withSecret()` has to generate the masks on the fly for "small" keys,
1181 * which requires more instructions than _withSeed() variants.
1182 * Therefore, _withSecretandSeed variant combines the best of both worlds.
1183 *
1184 * When @secret has been generated by XXH3_generateSecret_fromSeed(),
1185 * this variant produces *exactly* the same results as `_withSeed()` variant,
1186 * hence offering only a pure speed benefit on "large" input,
1187 * by skipping the need to regenerate the secret for every large input.
1188 *
1189 * Another usage scenario is to hash the secret to a 64-bit hash value,
1190 * for example with XXH3_64bits(), which then becomes the seed,
1191 * and then employ both the seed and the secret in _withSecretandSeed().
1192 * On top of speed, an added benefit is that each bit in the secret
1193 * has a 50% chance to swap each bit in the output,
1194 * via its impact to the seed.
1195 * This is not guaranteed when using the secret directly in "small data" scenarios,
1196 * because only portions of the secret are employed for small data.
1197 */
1198 XXH_PUBLIC_API XXH64_hash_t
1199 XXH3_64bits_withSecretandSeed(const void* data, size_t len,
1200 const void* secret, size_t secretSize,
1201 XXH64_hash_t seed);
1202
1203 XXH_PUBLIC_API XXH128_hash_t
1204 XXH3_128bits_withSecretandSeed(const void* data, size_t len,
1205 const void* secret, size_t secretSize,
1206 XXH64_hash_t seed64);
1207
1208 XXH_PUBLIC_API XXH_errorcode
1209 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1210 const void* secret, size_t secretSize,
1211 XXH64_hash_t seed64);
1212
1213 XXH_PUBLIC_API XXH_errorcode
1214 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1215 const void* secret, size_t secretSize,
1216 XXH64_hash_t seed64);
1217
1218
1219 #endif /* XXH_NO_LONG_LONG */
1220 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1221 # define XXH_IMPLEMENTATION
1222 #endif
1223
1224 #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1225
1226
1227 /* ======================================================================== */
1228 /* ======================================================================== */
1229 /* ======================================================================== */
1230
1231
1232 /*-**********************************************************************
1233 * xxHash implementation
1234 *-**********************************************************************
1235 * xxHash's implementation used to be hosted inside xxhash.c.
1236 *
1237 * However, inlining requires implementation to be visible to the compiler,
1238 * hence be included alongside the header.
1239 * Previously, implementation was hosted inside xxhash.c,
1240 * which was then #included when inlining was activated.
1241 * This construction created issues with a few build and install systems,
1242 * as it required xxhash.c to be stored in /include directory.
1243 *
1244 * xxHash implementation is now directly integrated within xxhash.h.
1245 * As a consequence, xxhash.c is no longer needed in /include.
1246 *
1247 * xxhash.c is still available and is still useful.
1248 * In a "normal" setup, when xxhash is not inlined,
1249 * xxhash.h only exposes the prototypes and public symbols,
1250 * while xxhash.c can be built into an object file xxhash.o
1251 * which can then be linked into the final binary.
1252 ************************************************************************/
1253
1254 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1255 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1256 # define XXH_IMPLEM_13a8737387
1257
1258 /* *************************************
1259 * Tuning parameters
1260 ***************************************/
1261
1262 /*!
1263 * @defgroup tuning Tuning parameters
1264 * @{
1265 *
1266 * Various macros to control xxHash's behavior.
1267 */
1268 #ifdef XXH_DOXYGEN
1269 /*!
1270 * @brief Define this to disable 64-bit code.
1271 *
1272 * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
1273 */
1274 # define XXH_NO_LONG_LONG
1275 # undef XXH_NO_LONG_LONG /* don't actually */
1276 /*!
1277 * @brief Controls how unaligned memory is accessed.
1278 *
1279 * By default, access to unaligned memory is controlled by `memcpy()`, which is
1280 * safe and portable.
1281 *
1282 * Unfortunately, on some target/compiler combinations, the generated assembly
1283 * is sub-optimal.
1284 *
1285 * The below switch allow selection of a different access method
1286 * in the search for improved performance.
1287 *
1288 * @par Possible options:
1289 *
1290 * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1291 * @par
1292 * Use `memcpy()`. Safe and portable. Note that most modern compilers will
1293 * eliminate the function call and treat it as an unaligned access.
1294 *
1295 * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
1296 * @par
1297 * Depends on compiler extensions and is therefore not portable.
1298 * This method is safe _if_ your compiler supports it,
1299 * and *generally* as fast or faster than `memcpy`.
1300 *
1301 * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1302 * @par
1303 * Casts directly and dereferences. This method doesn't depend on the
1304 * compiler, but it violates the C standard as it directly dereferences an
1305 * unaligned pointer. It can generate buggy code on targets which do not
1306 * support unaligned memory accesses, but in some circumstances, it's the
1307 * only known way to get the most performance.
1308 *
1309 * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1310 * @par
1311 * Also portable. This can generate the best code on old compilers which don't
1312 * inline small `memcpy()` calls, and it might also be faster on big-endian
1313 * systems which lack a native byteswap instruction. However, some compilers
1314 * will emit literal byteshifts even if the target supports unaligned access.
1315 * .
1316 *
1317 * @warning
1318 * Methods 1 and 2 rely on implementation-defined behavior. Use these with
1319 * care, as what works on one compiler/platform/optimization level may cause
1320 * another to read garbage data or even crash.
1321 *
1322 * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
1323 *
1324 * Prefer these methods in priority order (0 > 3 > 1 > 2)
1325 */
1326 # define XXH_FORCE_MEMORY_ACCESS 0
1327
1328 /*!
1329 * @def XXH_FORCE_ALIGN_CHECK
1330 * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1331 * and XXH64() only).
1332 *
1333 * This is an important performance trick for architectures without decent
1334 * unaligned memory access performance.
1335 *
1336 * It checks for input alignment, and when conditions are met, uses a "fast
1337 * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1338 * faster_ read speed.
1339 *
1340 * The check costs one initial branch per hash, which is generally negligible,
1341 * but not zero.
1342 *
1343 * Moreover, it's not useful to generate an additional code path if memory
1344 * access uses the same instruction for both aligned and unaligned
1345 * addresses (e.g. x86 and aarch64).
1346 *
1347 * In these cases, the alignment check can be removed by setting this macro to 0.
1348 * Then the code will always use unaligned memory access.
1349 * Align check is automatically disabled on x86, x64 & arm64,
1350 * which are platforms known to offer good unaligned memory accesses performance.
1351 *
1352 * This option does not affect XXH3 (only XXH32 and XXH64).
1353 */
1354 # define XXH_FORCE_ALIGN_CHECK 0
1355
1356 /*!
1357 * @def XXH_NO_INLINE_HINTS
1358 * @brief When non-zero, sets all functions to `static`.
1359 *
1360 * By default, xxHash tries to force the compiler to inline almost all internal
1361 * functions.
1362 *
1363 * This can usually improve performance due to reduced jumping and improved
1364 * constant folding, but significantly increases the size of the binary which
1365 * might not be favorable.
1366 *
1367 * Additionally, sometimes the forced inlining can be detrimental to performance,
1368 * depending on the architecture.
1369 *
1370 * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1371 * compiler full control on whether to inline or not.
1372 *
1373 * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
1374 * -fno-inline with GCC or Clang, this will automatically be defined.
1375 */
1376 # define XXH_NO_INLINE_HINTS 0
1377
1378 /*!
1379 * @def XXH32_ENDJMP
1380 * @brief Whether to use a jump for `XXH32_finalize`.
1381 *
1382 * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
1383 * This is generally preferable for performance,
1384 * but depending on exact architecture, a jmp may be preferable.
1385 *
1386 * This setting is only possibly making a difference for very small inputs.
1387 */
1388 # define XXH32_ENDJMP 0
1389
1390 /*!
1391 * @internal
1392 * @brief Redefines old internal names.
1393 *
1394 * For compatibility with code that uses xxHash's internals before the names
1395 * were changed to improve namespacing. There is no other reason to use this.
1396 */
1397 # define XXH_OLD_NAMES
1398 # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1399 #endif /* XXH_DOXYGEN */
1400 /*!
1401 * @}
1402 */
1403
1404 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
1405 /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
1406 # if !defined(__clang__) && \
1407 ( \
1408 (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1409 ( \
1410 defined(__GNUC__) && ( \
1411 (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
1412 ( \
1413 defined(__mips__) && \
1414 (__mips <= 5 || __mips_isa_rev < 6) && \
1415 (!defined(__mips16) || defined(__mips_mips16e2)) \
1416 ) \
1417 ) \
1418 ) \
1419 )
1420 # define XXH_FORCE_MEMORY_ACCESS 1
1421 # endif
1422 #endif
1423
1424 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
1425 # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
1426 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
1427 # define XXH_FORCE_ALIGN_CHECK 0
1428 # else
1429 # define XXH_FORCE_ALIGN_CHECK 1
1430 # endif
1431 #endif
1432
1433 #ifndef XXH_NO_INLINE_HINTS
1434 # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1435 || defined(__NO_INLINE__) /* -O0, -fno-inline */
1436 # define XXH_NO_INLINE_HINTS 1
1437 # else
1438 # define XXH_NO_INLINE_HINTS 0
1439 # endif
1440 #endif
1441
1442 #ifndef XXH32_ENDJMP
1443 /* generally preferable for performance */
1444 # define XXH32_ENDJMP 0
1445 #endif
1446
1447 /*!
1448 * @defgroup impl Implementation
1449 * @{
1450 */
1451
1452
1453 /* *************************************
1454 * Includes & Memory related functions
1455 ***************************************/
1456 /*
1457 * Modify the local functions below should you wish to use
1458 * different memory routines for malloc() and free()
1459 */
1460 #include <stdlib.h>
1461
1462 /*!
1463 * @internal
1464 * @brief Modify this function to use a different routine than malloc().
1465 */
XXH_malloc(size_t s)1466 static void* XXH_malloc(size_t s) { return malloc(s); }
1467
1468 /*!
1469 * @internal
1470 * @brief Modify this function to use a different routine than free().
1471 */
XXH_free(void * p)1472 static void XXH_free(void* p) { free(p); }
1473
1474 #include <string.h>
1475
1476 /*!
1477 * @internal
1478 * @brief Modify this function to use a different routine than memcpy().
1479 */
XXH_memcpy(void * dest,const void * src,size_t size)1480 static void* XXH_memcpy(void* dest, const void* src, size_t size)
1481 {
1482 return memcpy(dest,src,size);
1483 }
1484
1485 #include <limits.h> /* ULLONG_MAX */
1486
1487
1488 /* *************************************
1489 * Compiler Specific Options
1490 ***************************************/
1491 #ifdef _MSC_VER /* Visual Studio warning fix */
1492 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1493 #endif
1494
1495 #if XXH_NO_INLINE_HINTS /* disable inlining hints */
1496 # if defined(__GNUC__) || defined(__clang__)
1497 # define XXH_FORCE_INLINE static __attribute__((unused))
1498 # else
1499 # define XXH_FORCE_INLINE static
1500 # endif
1501 # define XXH_NO_INLINE static
1502 /* enable inlining hints */
1503 #elif defined(__GNUC__) || defined(__clang__)
1504 # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1505 # define XXH_NO_INLINE static __attribute__((noinline))
1506 #elif defined(_MSC_VER) /* Visual Studio */
1507 # define XXH_FORCE_INLINE static __forceinline
1508 # define XXH_NO_INLINE static __declspec(noinline)
1509 #elif defined (__cplusplus) \
1510 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
1511 # define XXH_FORCE_INLINE static inline
1512 # define XXH_NO_INLINE static
1513 #else
1514 # define XXH_FORCE_INLINE static
1515 # define XXH_NO_INLINE static
1516 #endif
1517
1518
1519
1520 /* *************************************
1521 * Debug
1522 ***************************************/
1523 /*!
1524 * @ingroup tuning
1525 * @def XXH_DEBUGLEVEL
1526 * @brief Sets the debugging level.
1527 *
1528 * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1529 * compiler's command line options. The value must be a number.
1530 */
1531 #ifndef XXH_DEBUGLEVEL
1532 # ifdef DEBUGLEVEL /* backwards compat */
1533 # define XXH_DEBUGLEVEL DEBUGLEVEL
1534 # else
1535 # define XXH_DEBUGLEVEL 0
1536 # endif
1537 #endif
1538
1539 #if (XXH_DEBUGLEVEL>=1)
1540 # include <assert.h> /* note: can still be disabled with NDEBUG */
1541 # define XXH_ASSERT(c) assert(c)
1542 #else
1543 # define XXH_ASSERT(c) ((void)0)
1544 #endif
1545
1546 /* note: use after variable declarations */
1547 #ifndef XXH_STATIC_ASSERT
1548 # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
1549 # include <assert.h>
1550 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1551 # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
1552 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1553 # else
1554 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1555 # endif
1556 # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1557 #endif
1558
1559 /*!
1560 * @internal
1561 * @def XXH_COMPILER_GUARD(var)
1562 * @brief Used to prevent unwanted optimizations for @p var.
1563 *
1564 * It uses an empty GCC inline assembly statement with a register constraint
1565 * which forces @p var into a general purpose register (eg eax, ebx, ecx
1566 * on x86) and marks it as modified.
1567 *
1568 * This is used in a few places to avoid unwanted autovectorization (e.g.
1569 * XXH32_round()). All vectorization we want is explicit via intrinsics,
1570 * and _usually_ isn't wanted elsewhere.
1571 *
1572 * We also use it to prevent unwanted constant folding for AArch64 in
1573 * XXH3_initCustomSecret_scalar().
1574 */
1575 #if defined(__GNUC__) || defined(__clang__)
1576 # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1577 #else
1578 # define XXH_COMPILER_GUARD(var) ((void)0)
1579 #endif
1580
1581 /* *************************************
1582 * Basic Types
1583 ***************************************/
1584 #if !defined (__VMS) \
1585 && (defined (__cplusplus) \
1586 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1587 # include <stdint.h>
1588 typedef uint8_t xxh_u8;
1589 #else
1590 typedef unsigned char xxh_u8;
1591 #endif
1592 typedef XXH32_hash_t xxh_u32;
1593
1594 #ifdef XXH_OLD_NAMES
1595 # define BYTE xxh_u8
1596 # define U8 xxh_u8
1597 # define U32 xxh_u32
1598 #endif
1599
1600 /* *** Memory access *** */
1601
1602 /*!
1603 * @internal
1604 * @fn xxh_u32 XXH_read32(const void* ptr)
1605 * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1606 *
1607 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1608 *
1609 * @param ptr The pointer to read from.
1610 * @return The 32-bit native endian integer from the bytes at @p ptr.
1611 */
1612
1613 /*!
1614 * @internal
1615 * @fn xxh_u32 XXH_readLE32(const void* ptr)
1616 * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1617 *
1618 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1619 *
1620 * @param ptr The pointer to read from.
1621 * @return The 32-bit little endian integer from the bytes at @p ptr.
1622 */
1623
1624 /*!
1625 * @internal
1626 * @fn xxh_u32 XXH_readBE32(const void* ptr)
1627 * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1628 *
1629 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1630 *
1631 * @param ptr The pointer to read from.
1632 * @return The 32-bit big endian integer from the bytes at @p ptr.
1633 */
1634
1635 /*!
1636 * @internal
1637 * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1638 * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1639 *
1640 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1641 * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1642 * always @ref XXH_alignment::XXH_unaligned.
1643 *
1644 * @param ptr The pointer to read from.
1645 * @param align Whether @p ptr is aligned.
1646 * @pre
1647 * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1648 * aligned.
1649 * @return The 32-bit little endian integer from the bytes at @p ptr.
1650 */
1651
1652 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1653 /*
1654 * Manual byteshift. Best for old compilers which don't inline memcpy.
1655 * We actually directly use XXH_readLE32 and XXH_readBE32.
1656 */
1657 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1658
1659 /*
1660 * Force direct memory access. Only works on CPU which support unaligned memory
1661 * access in hardware.
1662 */
XXH_read32(const void * memPtr)1663 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1664
1665 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1666
1667 /*
1668 * __pack instructions are safer but compiler specific, hence potentially
1669 * problematic for some compilers.
1670 *
1671 * Currently only defined for GCC and ICC.
1672 */
1673 #ifdef XXH_OLD_NAMES
1674 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1675 #endif
XXH_read32(const void * ptr)1676 static xxh_u32 XXH_read32(const void* ptr)
1677 {
1678 typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1679 return ((const xxh_unalign*)ptr)->u32;
1680 }
1681
1682 #else
1683
1684 /*
1685 * Portable and safe solution. Generally efficient.
1686 * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1687 */
XXH_read32(const void * memPtr)1688 static xxh_u32 XXH_read32(const void* memPtr)
1689 {
1690 xxh_u32 val;
1691 XXH_memcpy(&val, memPtr, sizeof(val));
1692 return val;
1693 }
1694
1695 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1696
1697
1698 /* *** Endianness *** */
1699
1700 /*!
1701 * @ingroup tuning
1702 * @def XXH_CPU_LITTLE_ENDIAN
1703 * @brief Whether the target is little endian.
1704 *
1705 * Defined to 1 if the target is little endian, or 0 if it is big endian.
1706 * It can be defined externally, for example on the compiler command line.
1707 *
1708 * If it is not defined,
1709 * a runtime check (which is usually constant folded) is used instead.
1710 *
1711 * @note
1712 * This is not necessarily defined to an integer constant.
1713 *
1714 * @see XXH_isLittleEndian() for the runtime check.
1715 */
1716 #ifndef XXH_CPU_LITTLE_ENDIAN
1717 /*
1718 * Try to detect endianness automatically, to avoid the nonstandard behavior
1719 * in `XXH_isLittleEndian()`
1720 */
1721 # if defined(_WIN32) /* Windows is always little endian */ \
1722 || defined(__LITTLE_ENDIAN__) \
1723 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1724 # define XXH_CPU_LITTLE_ENDIAN 1
1725 # elif defined(__BIG_ENDIAN__) \
1726 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1727 # define XXH_CPU_LITTLE_ENDIAN 0
1728 # else
1729 /*!
1730 * @internal
1731 * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
1732 *
1733 * Most compilers will constant fold this.
1734 */
XXH_isLittleEndian(void)1735 static int XXH_isLittleEndian(void)
1736 {
1737 /*
1738 * Portable and well-defined behavior.
1739 * Don't use static: it is detrimental to performance.
1740 */
1741 const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1742 return one.c[0];
1743 }
1744 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
1745 # endif
1746 #endif
1747
1748
1749
1750
1751 /* ****************************************
1752 * Compiler-specific Functions and Macros
1753 ******************************************/
1754 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1755
1756 #ifdef __has_builtin
1757 # define XXH_HAS_BUILTIN(x) __has_builtin(x)
1758 #else
1759 # define XXH_HAS_BUILTIN(x) 0
1760 #endif
1761
1762 /*!
1763 * @internal
1764 * @def XXH_rotl32(x,r)
1765 * @brief 32-bit rotate left.
1766 *
1767 * @param x The 32-bit integer to be rotated.
1768 * @param r The number of bits to rotate.
1769 * @pre
1770 * @p r > 0 && @p r < 32
1771 * @note
1772 * @p x and @p r may be evaluated multiple times.
1773 * @return The rotated result.
1774 */
1775 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1776 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1777 # define XXH_rotl32 __builtin_rotateleft32
1778 # define XXH_rotl64 __builtin_rotateleft64
1779 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1780 #elif defined(_MSC_VER)
1781 # define XXH_rotl32(x,r) _rotl(x,r)
1782 # define XXH_rotl64(x,r) _rotl64(x,r)
1783 #else
1784 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1785 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1786 #endif
1787
1788 /*!
1789 * @internal
1790 * @fn xxh_u32 XXH_swap32(xxh_u32 x)
1791 * @brief A 32-bit byteswap.
1792 *
1793 * @param x The 32-bit integer to byteswap.
1794 * @return @p x, byteswapped.
1795 */
1796 #if defined(_MSC_VER) /* Visual Studio */
1797 # define XXH_swap32 _byteswap_ulong
1798 #elif XXH_GCC_VERSION >= 403
1799 # define XXH_swap32 __builtin_bswap32
1800 #else
XXH_swap32(xxh_u32 x)1801 static xxh_u32 XXH_swap32 (xxh_u32 x)
1802 {
1803 return ((x << 24) & 0xff000000 ) |
1804 ((x << 8) & 0x00ff0000 ) |
1805 ((x >> 8) & 0x0000ff00 ) |
1806 ((x >> 24) & 0x000000ff );
1807 }
1808 #endif
1809
1810
1811 /* ***************************
1812 * Memory reads
1813 *****************************/
1814
1815 /*!
1816 * @internal
1817 * @brief Enum to indicate whether a pointer is aligned.
1818 */
1819 typedef enum {
1820 XXH_aligned, /*!< Aligned */
1821 XXH_unaligned /*!< Possibly unaligned */
1822 } XXH_alignment;
1823
1824 /*
1825 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1826 *
1827 * This is ideal for older compilers which don't inline memcpy.
1828 */
1829 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1830
XXH_readLE32(const void * memPtr)1831 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1832 {
1833 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1834 return bytePtr[0]
1835 | ((xxh_u32)bytePtr[1] << 8)
1836 | ((xxh_u32)bytePtr[2] << 16)
1837 | ((xxh_u32)bytePtr[3] << 24);
1838 }
1839
XXH_readBE32(const void * memPtr)1840 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1841 {
1842 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1843 return bytePtr[3]
1844 | ((xxh_u32)bytePtr[2] << 8)
1845 | ((xxh_u32)bytePtr[1] << 16)
1846 | ((xxh_u32)bytePtr[0] << 24);
1847 }
1848
1849 #else
XXH_readLE32(const void * ptr)1850 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1851 {
1852 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1853 }
1854
XXH_readBE32(const void * ptr)1855 static xxh_u32 XXH_readBE32(const void* ptr)
1856 {
1857 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1858 }
1859 #endif
1860
1861 XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void * ptr,XXH_alignment align)1862 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1863 {
1864 if (align==XXH_unaligned) {
1865 return XXH_readLE32(ptr);
1866 } else {
1867 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1868 }
1869 }
1870
1871
1872 /* *************************************
1873 * Misc
1874 ***************************************/
1875 /*! @ingroup public */
XXH_versionNumber(void)1876 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1877
1878
1879 /* *******************************************************************
1880 * 32-bit hash functions
1881 *********************************************************************/
1882 /*!
1883 * @}
1884 * @defgroup xxh32_impl XXH32 implementation
1885 * @ingroup impl
1886 * @{
1887 */
1888 /* #define instead of static const, to be used as initializers */
1889 #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
1890 #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
1891 #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
1892 #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
1893 #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
1894
1895 #ifdef XXH_OLD_NAMES
1896 # define PRIME32_1 XXH_PRIME32_1
1897 # define PRIME32_2 XXH_PRIME32_2
1898 # define PRIME32_3 XXH_PRIME32_3
1899 # define PRIME32_4 XXH_PRIME32_4
1900 # define PRIME32_5 XXH_PRIME32_5
1901 #endif
1902
1903 /*!
1904 * @internal
1905 * @brief Normal stripe processing routine.
1906 *
1907 * This shuffles the bits so that any bit from @p input impacts several bits in
1908 * @p acc.
1909 *
1910 * @param acc The accumulator lane.
1911 * @param input The stripe of input to mix.
1912 * @return The mixed accumulator lane.
1913 */
XXH32_round(xxh_u32 acc,xxh_u32 input)1914 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1915 {
1916 acc += input * XXH_PRIME32_2;
1917 acc = XXH_rotl32(acc, 13);
1918 acc *= XXH_PRIME32_1;
1919 #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1920 /*
1921 * UGLY HACK:
1922 * A compiler fence is the only thing that prevents GCC and Clang from
1923 * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1924 * reason) without globally disabling SSE4.1.
1925 *
1926 * The reason we want to avoid vectorization is because despite working on
1927 * 4 integers at a time, there are multiple factors slowing XXH32 down on
1928 * SSE4:
1929 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1930 * newer chips!) making it slightly slower to multiply four integers at
1931 * once compared to four integers independently. Even when pmulld was
1932 * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1933 * just to multiply unless doing a long operation.
1934 *
1935 * - Four instructions are required to rotate,
1936 * movqda tmp, v // not required with VEX encoding
1937 * pslld tmp, 13 // tmp <<= 13
1938 * psrld v, 19 // x >>= 19
1939 * por v, tmp // x |= tmp
1940 * compared to one for scalar:
1941 * roll v, 13 // reliably fast across the board
1942 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
1943 *
1944 * - Instruction level parallelism is actually more beneficial here because
1945 * the SIMD actually serializes this operation: While v1 is rotating, v2
1946 * can load data, while v3 can multiply. SSE forces them to operate
1947 * together.
1948 *
1949 * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1950 * and it is pointless writing a NEON implementation that is basically the
1951 * same speed as scalar for XXH32.
1952 */
1953 XXH_COMPILER_GUARD(acc);
1954 #endif
1955 return acc;
1956 }
1957
1958 /*!
1959 * @internal
1960 * @brief Mixes all bits to finalize the hash.
1961 *
1962 * The final mix ensures that all input bits have a chance to impact any bit in
1963 * the output digest, resulting in an unbiased distribution.
1964 *
1965 * @param h32 The hash to avalanche.
1966 * @return The avalanched hash.
1967 */
XXH32_avalanche(xxh_u32 h32)1968 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1969 {
1970 h32 ^= h32 >> 15;
1971 h32 *= XXH_PRIME32_2;
1972 h32 ^= h32 >> 13;
1973 h32 *= XXH_PRIME32_3;
1974 h32 ^= h32 >> 16;
1975 return(h32);
1976 }
1977
1978 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1979
1980 /*!
1981 * @internal
1982 * @brief Processes the last 0-15 bytes of @p ptr.
1983 *
1984 * There may be up to 15 bytes remaining to consume from the input.
1985 * This final stage will digest them to ensure that all input bytes are present
1986 * in the final mix.
1987 *
1988 * @param h32 The hash to finalize.
1989 * @param ptr The pointer to the remaining input.
1990 * @param len The remaining length, modulo 16.
1991 * @param align Whether @p ptr is aligned.
1992 * @return The finalized hash.
1993 */
1994 static xxh_u32
XXH32_finalize(xxh_u32 h32,const xxh_u8 * ptr,size_t len,XXH_alignment align)1995 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1996 {
1997 #define XXH_PROCESS1 do { \
1998 h32 += (*ptr++) * XXH_PRIME32_5; \
1999 h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
2000 } while (0)
2001
2002 #define XXH_PROCESS4 do { \
2003 h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
2004 ptr += 4; \
2005 h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
2006 } while (0)
2007
2008 if (ptr==NULL) XXH_ASSERT(len == 0);
2009
2010 /* Compact rerolled version; generally faster */
2011 if (!XXH32_ENDJMP) {
2012 len &= 15;
2013 while (len >= 4) {
2014 XXH_PROCESS4;
2015 len -= 4;
2016 }
2017 while (len > 0) {
2018 XXH_PROCESS1;
2019 --len;
2020 }
2021 return XXH32_avalanche(h32);
2022 } else {
2023 switch(len&15) /* or switch(bEnd - p) */ {
2024 case 12: XXH_PROCESS4;
2025 XXH_FALLTHROUGH;
2026 case 8: XXH_PROCESS4;
2027 XXH_FALLTHROUGH;
2028 case 4: XXH_PROCESS4;
2029 return XXH32_avalanche(h32);
2030
2031 case 13: XXH_PROCESS4;
2032 XXH_FALLTHROUGH;
2033 case 9: XXH_PROCESS4;
2034 XXH_FALLTHROUGH;
2035 case 5: XXH_PROCESS4;
2036 XXH_PROCESS1;
2037 return XXH32_avalanche(h32);
2038
2039 case 14: XXH_PROCESS4;
2040 XXH_FALLTHROUGH;
2041 case 10: XXH_PROCESS4;
2042 XXH_FALLTHROUGH;
2043 case 6: XXH_PROCESS4;
2044 XXH_PROCESS1;
2045 XXH_PROCESS1;
2046 return XXH32_avalanche(h32);
2047
2048 case 15: XXH_PROCESS4;
2049 XXH_FALLTHROUGH;
2050 case 11: XXH_PROCESS4;
2051 XXH_FALLTHROUGH;
2052 case 7: XXH_PROCESS4;
2053 XXH_FALLTHROUGH;
2054 case 3: XXH_PROCESS1;
2055 XXH_FALLTHROUGH;
2056 case 2: XXH_PROCESS1;
2057 XXH_FALLTHROUGH;
2058 case 1: XXH_PROCESS1;
2059 XXH_FALLTHROUGH;
2060 case 0: return XXH32_avalanche(h32);
2061 }
2062 XXH_ASSERT(0);
2063 return h32; /* reaching this point is deemed impossible */
2064 }
2065 }
2066
2067 #ifdef XXH_OLD_NAMES
2068 # define PROCESS1 XXH_PROCESS1
2069 # define PROCESS4 XXH_PROCESS4
2070 #else
2071 # undef XXH_PROCESS1
2072 # undef XXH_PROCESS4
2073 #endif
2074
2075 /*!
2076 * @internal
2077 * @brief The implementation for @ref XXH32().
2078 *
2079 * @param input , len , seed Directly passed from @ref XXH32().
2080 * @param align Whether @p input is aligned.
2081 * @return The calculated hash.
2082 */
2083 XXH_FORCE_INLINE xxh_u32
XXH32_endian_align(const xxh_u8 * input,size_t len,xxh_u32 seed,XXH_alignment align)2084 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2085 {
2086 xxh_u32 h32;
2087
2088 if (input==NULL) XXH_ASSERT(len == 0);
2089
2090 if (len>=16) {
2091 const xxh_u8* const bEnd = input + len;
2092 const xxh_u8* const limit = bEnd - 15;
2093 xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2094 xxh_u32 v2 = seed + XXH_PRIME32_2;
2095 xxh_u32 v3 = seed + 0;
2096 xxh_u32 v4 = seed - XXH_PRIME32_1;
2097
2098 do {
2099 v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
2100 v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
2101 v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
2102 v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
2103 } while (input < limit);
2104
2105 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
2106 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
2107 } else {
2108 h32 = seed + XXH_PRIME32_5;
2109 }
2110
2111 h32 += (xxh_u32)len;
2112
2113 return XXH32_finalize(h32, input, len&15, align);
2114 }
2115
2116 /*! @ingroup xxh32_family */
XXH32(const void * input,size_t len,XXH32_hash_t seed)2117 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
2118 {
2119 #if 0
2120 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2121 XXH32_state_t state;
2122 XXH32_reset(&state, seed);
2123 XXH32_update(&state, (const xxh_u8*)input, len);
2124 return XXH32_digest(&state);
2125 #else
2126 if (XXH_FORCE_ALIGN_CHECK) {
2127 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
2128 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2129 } }
2130
2131 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2132 #endif
2133 }
2134
2135
2136
2137 /******* Hash streaming *******/
2138 /*!
2139 * @ingroup xxh32_family
2140 */
XXH32_createState(void)2141 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
2142 {
2143 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
2144 }
2145 /*! @ingroup xxh32_family */
XXH32_freeState(XXH32_state_t * statePtr)2146 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
2147 {
2148 XXH_free(statePtr);
2149 return XXH_OK;
2150 }
2151
2152 /*! @ingroup xxh32_family */
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)2153 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
2154 {
2155 XXH_memcpy(dstState, srcState, sizeof(*dstState));
2156 }
2157
2158 /*! @ingroup xxh32_family */
XXH32_reset(XXH32_state_t * statePtr,XXH32_hash_t seed)2159 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
2160 {
2161 XXH_ASSERT(statePtr != NULL);
2162 memset(statePtr, 0, sizeof(*statePtr));
2163 statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2164 statePtr->v[1] = seed + XXH_PRIME32_2;
2165 statePtr->v[2] = seed + 0;
2166 statePtr->v[3] = seed - XXH_PRIME32_1;
2167 return XXH_OK;
2168 }
2169
2170
2171 /*! @ingroup xxh32_family */
2172 XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)2173 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2174 {
2175 if (input==NULL) {
2176 XXH_ASSERT(len == 0);
2177 return XXH_OK;
2178 }
2179
2180 { const xxh_u8* p = (const xxh_u8*)input;
2181 const xxh_u8* const bEnd = p + len;
2182
2183 state->total_len_32 += (XXH32_hash_t)len;
2184 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2185
2186 if (state->memsize + len < 16) { /* fill in tmp buffer */
2187 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2188 state->memsize += (XXH32_hash_t)len;
2189 return XXH_OK;
2190 }
2191
2192 if (state->memsize) { /* some data left from previous update */
2193 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2194 { const xxh_u32* p32 = state->mem32;
2195 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
2196 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
2197 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
2198 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
2199 }
2200 p += 16-state->memsize;
2201 state->memsize = 0;
2202 }
2203
2204 if (p <= bEnd-16) {
2205 const xxh_u8* const limit = bEnd - 16;
2206
2207 do {
2208 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
2209 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
2210 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
2211 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
2212 } while (p<=limit);
2213
2214 }
2215
2216 if (p < bEnd) {
2217 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2218 state->memsize = (unsigned)(bEnd-p);
2219 }
2220 }
2221
2222 return XXH_OK;
2223 }
2224
2225
2226 /*! @ingroup xxh32_family */
XXH32_digest(const XXH32_state_t * state)2227 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2228 {
2229 xxh_u32 h32;
2230
2231 if (state->large_len) {
2232 h32 = XXH_rotl32(state->v[0], 1)
2233 + XXH_rotl32(state->v[1], 7)
2234 + XXH_rotl32(state->v[2], 12)
2235 + XXH_rotl32(state->v[3], 18);
2236 } else {
2237 h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
2238 }
2239
2240 h32 += state->total_len_32;
2241
2242 return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2243 }
2244
2245
2246 /******* Canonical representation *******/
2247
2248 /*!
2249 * @ingroup xxh32_family
2250 * The default return values from XXH functions are unsigned 32 and 64 bit
2251 * integers.
2252 *
2253 * The canonical representation uses big endian convention, the same convention
2254 * as human-readable numbers (large digits first).
2255 *
2256 * This way, hash values can be written into a file or buffer, remaining
2257 * comparable across different systems.
2258 *
2259 * The following functions allow transformation of hash values to and from their
2260 * canonical format.
2261 */
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)2262 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2263 {
2264 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
2265 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2266 XXH_memcpy(dst, &hash, sizeof(*dst));
2267 }
2268 /*! @ingroup xxh32_family */
XXH32_hashFromCanonical(const XXH32_canonical_t * src)2269 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2270 {
2271 return XXH_readBE32(src);
2272 }
2273
2274
2275 #ifndef XXH_NO_LONG_LONG
2276
2277 /* *******************************************************************
2278 * 64-bit hash functions
2279 *********************************************************************/
2280 /*!
2281 * @}
2282 * @ingroup impl
2283 * @{
2284 */
2285 /******* Memory access *******/
2286
2287 typedef XXH64_hash_t xxh_u64;
2288
2289 #ifdef XXH_OLD_NAMES
2290 # define U64 xxh_u64
2291 #endif
2292
2293 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2294 /*
2295 * Manual byteshift. Best for old compilers which don't inline memcpy.
2296 * We actually directly use XXH_readLE64 and XXH_readBE64.
2297 */
2298 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2299
2300 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)2301 static xxh_u64 XXH_read64(const void* memPtr)
2302 {
2303 return *(const xxh_u64*) memPtr;
2304 }
2305
2306 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2307
2308 /*
2309 * __pack instructions are safer, but compiler specific, hence potentially
2310 * problematic for some compilers.
2311 *
2312 * Currently only defined for GCC and ICC.
2313 */
2314 #ifdef XXH_OLD_NAMES
2315 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2316 #endif
XXH_read64(const void * ptr)2317 static xxh_u64 XXH_read64(const void* ptr)
2318 {
2319 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2320 return ((const xxh_unalign64*)ptr)->u64;
2321 }
2322
2323 #else
2324
2325 /*
2326 * Portable and safe solution. Generally efficient.
2327 * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2328 */
XXH_read64(const void * memPtr)2329 static xxh_u64 XXH_read64(const void* memPtr)
2330 {
2331 xxh_u64 val;
2332 XXH_memcpy(&val, memPtr, sizeof(val));
2333 return val;
2334 }
2335
2336 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2337
2338 #if defined(_MSC_VER) /* Visual Studio */
2339 # define XXH_swap64 _byteswap_uint64
2340 #elif XXH_GCC_VERSION >= 403
2341 # define XXH_swap64 __builtin_bswap64
2342 #else
XXH_swap64(xxh_u64 x)2343 static xxh_u64 XXH_swap64(xxh_u64 x)
2344 {
2345 return ((x << 56) & 0xff00000000000000ULL) |
2346 ((x << 40) & 0x00ff000000000000ULL) |
2347 ((x << 24) & 0x0000ff0000000000ULL) |
2348 ((x << 8) & 0x000000ff00000000ULL) |
2349 ((x >> 8) & 0x00000000ff000000ULL) |
2350 ((x >> 24) & 0x0000000000ff0000ULL) |
2351 ((x >> 40) & 0x000000000000ff00ULL) |
2352 ((x >> 56) & 0x00000000000000ffULL);
2353 }
2354 #endif
2355
2356
2357 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2358 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2359
XXH_readLE64(const void * memPtr)2360 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2361 {
2362 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2363 return bytePtr[0]
2364 | ((xxh_u64)bytePtr[1] << 8)
2365 | ((xxh_u64)bytePtr[2] << 16)
2366 | ((xxh_u64)bytePtr[3] << 24)
2367 | ((xxh_u64)bytePtr[4] << 32)
2368 | ((xxh_u64)bytePtr[5] << 40)
2369 | ((xxh_u64)bytePtr[6] << 48)
2370 | ((xxh_u64)bytePtr[7] << 56);
2371 }
2372
XXH_readBE64(const void * memPtr)2373 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2374 {
2375 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2376 return bytePtr[7]
2377 | ((xxh_u64)bytePtr[6] << 8)
2378 | ((xxh_u64)bytePtr[5] << 16)
2379 | ((xxh_u64)bytePtr[4] << 24)
2380 | ((xxh_u64)bytePtr[3] << 32)
2381 | ((xxh_u64)bytePtr[2] << 40)
2382 | ((xxh_u64)bytePtr[1] << 48)
2383 | ((xxh_u64)bytePtr[0] << 56);
2384 }
2385
2386 #else
XXH_readLE64(const void * ptr)2387 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2388 {
2389 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2390 }
2391
XXH_readBE64(const void * ptr)2392 static xxh_u64 XXH_readBE64(const void* ptr)
2393 {
2394 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2395 }
2396 #endif
2397
2398 XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void * ptr,XXH_alignment align)2399 XXH_readLE64_align(const void* ptr, XXH_alignment align)
2400 {
2401 if (align==XXH_unaligned)
2402 return XXH_readLE64(ptr);
2403 else
2404 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2405 }
2406
2407
2408 /******* xxh64 *******/
2409 /*!
2410 * @}
2411 * @defgroup xxh64_impl XXH64 implementation
2412 * @ingroup impl
2413 * @{
2414 */
2415 /* #define rather that static const, to be used as initializers */
2416 #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2417 #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2418 #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2419 #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2420 #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2421
2422 #ifdef XXH_OLD_NAMES
2423 # define PRIME64_1 XXH_PRIME64_1
2424 # define PRIME64_2 XXH_PRIME64_2
2425 # define PRIME64_3 XXH_PRIME64_3
2426 # define PRIME64_4 XXH_PRIME64_4
2427 # define PRIME64_5 XXH_PRIME64_5
2428 #endif
2429
XXH64_round(xxh_u64 acc,xxh_u64 input)2430 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2431 {
2432 acc += input * XXH_PRIME64_2;
2433 acc = XXH_rotl64(acc, 31);
2434 acc *= XXH_PRIME64_1;
2435 return acc;
2436 }
2437
XXH64_mergeRound(xxh_u64 acc,xxh_u64 val)2438 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2439 {
2440 val = XXH64_round(0, val);
2441 acc ^= val;
2442 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2443 return acc;
2444 }
2445
XXH64_avalanche(xxh_u64 h64)2446 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2447 {
2448 h64 ^= h64 >> 33;
2449 h64 *= XXH_PRIME64_2;
2450 h64 ^= h64 >> 29;
2451 h64 *= XXH_PRIME64_3;
2452 h64 ^= h64 >> 32;
2453 return h64;
2454 }
2455
2456
2457 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2458
2459 static xxh_u64
XXH64_finalize(xxh_u64 h64,const xxh_u8 * ptr,size_t len,XXH_alignment align)2460 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2461 {
2462 if (ptr==NULL) XXH_ASSERT(len == 0);
2463 len &= 31;
2464 while (len >= 8) {
2465 xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2466 ptr += 8;
2467 h64 ^= k1;
2468 h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2469 len -= 8;
2470 }
2471 if (len >= 4) {
2472 h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2473 ptr += 4;
2474 h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2475 len -= 4;
2476 }
2477 while (len > 0) {
2478 h64 ^= (*ptr++) * XXH_PRIME64_5;
2479 h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2480 --len;
2481 }
2482 return XXH64_avalanche(h64);
2483 }
2484
2485 #ifdef XXH_OLD_NAMES
2486 # define PROCESS1_64 XXH_PROCESS1_64
2487 # define PROCESS4_64 XXH_PROCESS4_64
2488 # define PROCESS8_64 XXH_PROCESS8_64
2489 #else
2490 # undef XXH_PROCESS1_64
2491 # undef XXH_PROCESS4_64
2492 # undef XXH_PROCESS8_64
2493 #endif
2494
2495 XXH_FORCE_INLINE xxh_u64
XXH64_endian_align(const xxh_u8 * input,size_t len,xxh_u64 seed,XXH_alignment align)2496 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2497 {
2498 xxh_u64 h64;
2499 if (input==NULL) XXH_ASSERT(len == 0);
2500
2501 if (len>=32) {
2502 const xxh_u8* const bEnd = input + len;
2503 const xxh_u8* const limit = bEnd - 31;
2504 xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2505 xxh_u64 v2 = seed + XXH_PRIME64_2;
2506 xxh_u64 v3 = seed + 0;
2507 xxh_u64 v4 = seed - XXH_PRIME64_1;
2508
2509 do {
2510 v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2511 v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2512 v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2513 v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2514 } while (input<limit);
2515
2516 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2517 h64 = XXH64_mergeRound(h64, v1);
2518 h64 = XXH64_mergeRound(h64, v2);
2519 h64 = XXH64_mergeRound(h64, v3);
2520 h64 = XXH64_mergeRound(h64, v4);
2521
2522 } else {
2523 h64 = seed + XXH_PRIME64_5;
2524 }
2525
2526 h64 += (xxh_u64) len;
2527
2528 return XXH64_finalize(h64, input, len, align);
2529 }
2530
2531
2532 /*! @ingroup xxh64_family */
XXH64(const void * input,size_t len,XXH64_hash_t seed)2533 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
2534 {
2535 #if 0
2536 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2537 XXH64_state_t state;
2538 XXH64_reset(&state, seed);
2539 XXH64_update(&state, (const xxh_u8*)input, len);
2540 return XXH64_digest(&state);
2541 #else
2542 if (XXH_FORCE_ALIGN_CHECK) {
2543 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
2544 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2545 } }
2546
2547 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2548
2549 #endif
2550 }
2551
2552 /******* Hash Streaming *******/
2553
2554 /*! @ingroup xxh64_family*/
XXH64_createState(void)2555 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2556 {
2557 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2558 }
2559 /*! @ingroup xxh64_family */
XXH64_freeState(XXH64_state_t * statePtr)2560 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2561 {
2562 XXH_free(statePtr);
2563 return XXH_OK;
2564 }
2565
2566 /*! @ingroup xxh64_family */
XXH64_copyState(XXH64_state_t * dstState,const XXH64_state_t * srcState)2567 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2568 {
2569 XXH_memcpy(dstState, srcState, sizeof(*dstState));
2570 }
2571
2572 /*! @ingroup xxh64_family */
XXH64_reset(XXH64_state_t * statePtr,XXH64_hash_t seed)2573 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
2574 {
2575 XXH_ASSERT(statePtr != NULL);
2576 memset(statePtr, 0, sizeof(*statePtr));
2577 statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2578 statePtr->v[1] = seed + XXH_PRIME64_2;
2579 statePtr->v[2] = seed + 0;
2580 statePtr->v[3] = seed - XXH_PRIME64_1;
2581 return XXH_OK;
2582 }
2583
2584 /*! @ingroup xxh64_family */
2585 XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH64_state_t * state,const void * input,size_t len)2586 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2587 {
2588 if (input==NULL) {
2589 XXH_ASSERT(len == 0);
2590 return XXH_OK;
2591 }
2592
2593 { const xxh_u8* p = (const xxh_u8*)input;
2594 const xxh_u8* const bEnd = p + len;
2595
2596 state->total_len += len;
2597
2598 if (state->memsize + len < 32) { /* fill in tmp buffer */
2599 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2600 state->memsize += (xxh_u32)len;
2601 return XXH_OK;
2602 }
2603
2604 if (state->memsize) { /* tmp buffer is full */
2605 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2606 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
2607 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
2608 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
2609 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
2610 p += 32 - state->memsize;
2611 state->memsize = 0;
2612 }
2613
2614 if (p+32 <= bEnd) {
2615 const xxh_u8* const limit = bEnd - 32;
2616
2617 do {
2618 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
2619 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
2620 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
2621 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
2622 } while (p<=limit);
2623
2624 }
2625
2626 if (p < bEnd) {
2627 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2628 state->memsize = (unsigned)(bEnd-p);
2629 }
2630 }
2631
2632 return XXH_OK;
2633 }
2634
2635
2636 /*! @ingroup xxh64_family */
XXH64_digest(const XXH64_state_t * state)2637 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
2638 {
2639 xxh_u64 h64;
2640
2641 if (state->total_len >= 32) {
2642 h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
2643 h64 = XXH64_mergeRound(h64, state->v[0]);
2644 h64 = XXH64_mergeRound(h64, state->v[1]);
2645 h64 = XXH64_mergeRound(h64, state->v[2]);
2646 h64 = XXH64_mergeRound(h64, state->v[3]);
2647 } else {
2648 h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
2649 }
2650
2651 h64 += (xxh_u64) state->total_len;
2652
2653 return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2654 }
2655
2656
2657 /******* Canonical representation *******/
2658
2659 /*! @ingroup xxh64_family */
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)2660 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
2661 {
2662 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
2663 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2664 XXH_memcpy(dst, &hash, sizeof(*dst));
2665 }
2666
2667 /*! @ingroup xxh64_family */
XXH64_hashFromCanonical(const XXH64_canonical_t * src)2668 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
2669 {
2670 return XXH_readBE64(src);
2671 }
2672
2673 #ifndef XXH_NO_XXH3
2674
2675 /* *********************************************************************
2676 * XXH3
2677 * New generation hash designed for speed on small keys and vectorization
2678 ************************************************************************ */
2679 /*!
2680 * @}
2681 * @defgroup xxh3_impl XXH3 implementation
2682 * @ingroup impl
2683 * @{
2684 */
2685
2686 /* === Compiler specifics === */
2687
2688 #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2689 # define XXH_RESTRICT /* disable */
2690 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
2691 # define XXH_RESTRICT restrict
2692 #else
2693 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2694 # define XXH_RESTRICT /* disable */
2695 #endif
2696
2697 #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
2698 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2699 || defined(__clang__)
2700 # define XXH_likely(x) __builtin_expect(x, 1)
2701 # define XXH_unlikely(x) __builtin_expect(x, 0)
2702 #else
2703 # define XXH_likely(x) (x)
2704 # define XXH_unlikely(x) (x)
2705 #endif
2706
2707 #if defined(__GNUC__) || defined(__clang__)
2708 # if defined(__ARM_NEON__) || defined(__ARM_NEON) \
2709 || defined(__aarch64__) || defined(_M_ARM) \
2710 || defined(_M_ARM64) || defined(_M_ARM64EC)
2711 # define inline __inline__ /* circumvent a clang bug */
2712 # include <arm_neon.h>
2713 # undef inline
2714 # elif defined(__AVX2__)
2715 # include <immintrin.h>
2716 # elif defined(__SSE2__)
2717 # include <emmintrin.h>
2718 # endif
2719 #endif
2720
2721 #if defined(_MSC_VER)
2722 # include <intrin.h>
2723 #endif
2724
2725 /*
2726 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2727 * remaining a true 64-bit/128-bit hash function.
2728 *
2729 * This is done by prioritizing a subset of 64-bit operations that can be
2730 * emulated without too many steps on the average 32-bit machine.
2731 *
2732 * For example, these two lines seem similar, and run equally fast on 64-bit:
2733 *
2734 * xxh_u64 x;
2735 * x ^= (x >> 47); // good
2736 * x ^= (x >> 13); // bad
2737 *
2738 * However, to a 32-bit machine, there is a major difference.
2739 *
2740 * x ^= (x >> 47) looks like this:
2741 *
2742 * x.lo ^= (x.hi >> (47 - 32));
2743 *
2744 * while x ^= (x >> 13) looks like this:
2745 *
2746 * // note: funnel shifts are not usually cheap.
2747 * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2748 * x.hi ^= (x.hi >> 13);
2749 *
2750 * The first one is significantly faster than the second, simply because the
2751 * shift is larger than 32. This means:
2752 * - All the bits we need are in the upper 32 bits, so we can ignore the lower
2753 * 32 bits in the shift.
2754 * - The shift result will always fit in the lower 32 bits, and therefore,
2755 * we can ignore the upper 32 bits in the xor.
2756 *
2757 * Thanks to this optimization, XXH3 only requires these features to be efficient:
2758 *
2759 * - Usable unaligned access
2760 * - A 32-bit or 64-bit ALU
2761 * - If 32-bit, a decent ADC instruction
2762 * - A 32 or 64-bit multiply with a 64-bit result
2763 * - For the 128-bit variant, a decent byteswap helps short inputs.
2764 *
2765 * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2766 * platforms which can run XXH32 can run XXH3 efficiently.
2767 *
2768 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2769 * notable exception.
2770 *
2771 * First of all, Thumb-1 lacks support for the UMULL instruction which
2772 * performs the important long multiply. This means numerous __aeabi_lmul
2773 * calls.
2774 *
2775 * Second of all, the 8 functional registers are just not enough.
2776 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2777 * Lo registers, and this shuffling results in thousands more MOVs than A32.
2778 *
2779 * A32 and T32 don't have this limitation. They can access all 14 registers,
2780 * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2781 * shifts is helpful, too.
2782 *
2783 * Therefore, we do a quick sanity check.
2784 *
2785 * If compiling Thumb-1 for a target which supports ARM instructions, we will
2786 * emit a warning, as it is not a "sane" platform to compile for.
2787 *
2788 * Usually, if this happens, it is because of an accident and you probably need
2789 * to specify -march, as you likely meant to compile for a newer architecture.
2790 *
2791 * Credit: large sections of the vectorial and asm source code paths
2792 * have been contributed by @easyaspi314
2793 */
2794 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2795 # warning "XXH3 is highly inefficient without ARM or Thumb-2."
2796 #endif
2797
2798 /* ==========================================
2799 * Vectorization detection
2800 * ========================================== */
2801
2802 #ifdef XXH_DOXYGEN
2803 /*!
2804 * @ingroup tuning
2805 * @brief Overrides the vectorization implementation chosen for XXH3.
2806 *
2807 * Can be defined to 0 to disable SIMD or any of the values mentioned in
2808 * @ref XXH_VECTOR_TYPE.
2809 *
2810 * If this is not defined, it uses predefined macros to determine the best
2811 * implementation.
2812 */
2813 # define XXH_VECTOR XXH_SCALAR
2814 /*!
2815 * @ingroup tuning
2816 * @brief Possible values for @ref XXH_VECTOR.
2817 *
2818 * Note that these are actually implemented as macros.
2819 *
2820 * If this is not defined, it is detected automatically.
2821 * @ref XXH_X86DISPATCH overrides this.
2822 */
2823 enum XXH_VECTOR_TYPE /* fake enum */ {
2824 XXH_SCALAR = 0, /*!< Portable scalar version */
2825 XXH_SSE2 = 1, /*!<
2826 * SSE2 for Pentium 4, Opteron, all x86_64.
2827 *
2828 * @note SSE2 is also guaranteed on Windows 10, macOS, and
2829 * Android x86.
2830 */
2831 XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
2832 XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
2833 XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
2834 XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
2835 };
2836 /*!
2837 * @ingroup tuning
2838 * @brief Selects the minimum alignment for XXH3's accumulators.
2839 *
2840 * When using SIMD, this should match the alignment reqired for said vector
2841 * type, so, for example, 32 for AVX2.
2842 *
2843 * Default: Auto detected.
2844 */
2845 # define XXH_ACC_ALIGN 8
2846 #endif
2847
2848 /* Actual definition */
2849 #ifndef XXH_DOXYGEN
2850 # define XXH_SCALAR 0
2851 # define XXH_SSE2 1
2852 # define XXH_AVX2 2
2853 # define XXH_AVX512 3
2854 # define XXH_NEON 4
2855 # define XXH_VSX 5
2856 #endif
2857
2858 #ifndef XXH_VECTOR /* can be defined on command line */
2859 # if ( \
2860 defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
2861 || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
2862 ) && ( \
2863 defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
2864 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
2865 )
2866 # define XXH_VECTOR XXH_NEON
2867 # elif defined(__AVX512F__)
2868 # define XXH_VECTOR XXH_AVX512
2869 # elif defined(__AVX2__)
2870 # define XXH_VECTOR XXH_AVX2
2871 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2872 # define XXH_VECTOR XXH_SSE2
2873 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2874 || (defined(__s390x__) && defined(__VEC__)) \
2875 && defined(__GNUC__) /* TODO: IBM XL */
2876 # define XXH_VECTOR XXH_VSX
2877 # else
2878 # define XXH_VECTOR XXH_SCALAR
2879 # endif
2880 #endif
2881
2882 /*
2883 * Controls the alignment of the accumulator,
2884 * for compatibility with aligned vector loads, which are usually faster.
2885 */
2886 #ifndef XXH_ACC_ALIGN
2887 # if defined(XXH_X86DISPATCH)
2888 # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
2889 # elif XXH_VECTOR == XXH_SCALAR /* scalar */
2890 # define XXH_ACC_ALIGN 8
2891 # elif XXH_VECTOR == XXH_SSE2 /* sse2 */
2892 # define XXH_ACC_ALIGN 16
2893 # elif XXH_VECTOR == XXH_AVX2 /* avx2 */
2894 # define XXH_ACC_ALIGN 32
2895 # elif XXH_VECTOR == XXH_NEON /* neon */
2896 # define XXH_ACC_ALIGN 16
2897 # elif XXH_VECTOR == XXH_VSX /* vsx */
2898 # define XXH_ACC_ALIGN 16
2899 # elif XXH_VECTOR == XXH_AVX512 /* avx512 */
2900 # define XXH_ACC_ALIGN 64
2901 # endif
2902 #endif
2903
2904 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2905 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2906 # define XXH_SEC_ALIGN XXH_ACC_ALIGN
2907 #else
2908 # define XXH_SEC_ALIGN 8
2909 #endif
2910
2911 /*
2912 * UGLY HACK:
2913 * GCC usually generates the best code with -O3 for xxHash.
2914 *
2915 * However, when targeting AVX2, it is overzealous in its unrolling resulting
2916 * in code roughly 3/4 the speed of Clang.
2917 *
2918 * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2919 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2920 * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2921 *
2922 * That is why when compiling the AVX2 version, it is recommended to use either
2923 * -O2 -mavx2 -march=haswell
2924 * or
2925 * -O2 -mavx2 -mno-avx256-split-unaligned-load
2926 * for decent performance, or to use Clang instead.
2927 *
2928 * Fortunately, we can control the first one with a pragma that forces GCC into
2929 * -O2, but the other one we can't control without "failed to inline always
2930 * inline function due to target mismatch" warnings.
2931 */
2932 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2933 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2934 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2935 # pragma GCC push_options
2936 # pragma GCC optimize("-O2")
2937 #endif
2938
2939
2940 #if XXH_VECTOR == XXH_NEON
2941 /*
2942 * NEON's setup for vmlal_u32 is a little more complicated than it is on
2943 * SSE2, AVX2, and VSX.
2944 *
2945 * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2946 *
2947 * To do the same operation, the 128-bit 'Q' register needs to be split into
2948 * two 64-bit 'D' registers, performing this operation::
2949 *
2950 * [ a | b ]
2951 * | '---------. .--------' |
2952 * | x |
2953 * | .---------' '--------. |
2954 * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
2955 *
2956 * Due to significant changes in aarch64, the fastest method for aarch64 is
2957 * completely different than the fastest method for ARMv7-A.
2958 *
2959 * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2960 * D11 will modify the high half of Q5. This is similar to how modifying AH
2961 * will only affect bits 8-15 of AX on x86.
2962 *
2963 * VZIP takes two registers, and puts even lanes in one register and odd lanes
2964 * in the other.
2965 *
2966 * On ARMv7-A, this strangely modifies both parameters in place instead of
2967 * taking the usual 3-operand form.
2968 *
2969 * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
2970 * lower and upper halves of the Q register to end up with the high and low
2971 * halves where we want - all in one instruction.
2972 *
2973 * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
2974 *
2975 * Unfortunately we need inline assembly for this: Instructions modifying two
2976 * registers at once is not possible in GCC or Clang's IR, and they have to
2977 * create a copy.
2978 *
2979 * aarch64 requires a different approach.
2980 *
2981 * In order to make it easier to write a decent compiler for aarch64, many
2982 * quirks were removed, such as conditional execution.
2983 *
2984 * NEON was also affected by this.
2985 *
2986 * aarch64 cannot access the high bits of a Q-form register, and writes to a
2987 * D-form register zero the high bits, similar to how writes to W-form scalar
2988 * registers (or DWORD registers on x86_64) work.
2989 *
2990 * The formerly free vget_high intrinsics now require a vext (with a few
2991 * exceptions)
2992 *
2993 * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
2994 * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
2995 * operand.
2996 *
2997 * The equivalent of the VZIP.32 on the lower and upper halves would be this
2998 * mess:
2999 *
3000 * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
3001 * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
3002 * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
3003 *
3004 * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
3005 *
3006 * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
3007 * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
3008 *
3009 * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
3010 */
3011
3012 /*!
3013 * Function-like macro:
3014 * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
3015 * {
3016 * outLo = (uint32x2_t)(in & 0xFFFFFFFF);
3017 * outHi = (uint32x2_t)(in >> 32);
3018 * in = UNDEFINED;
3019 * }
3020 */
3021 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
3022 && (defined(__GNUC__) || defined(__clang__)) \
3023 && (defined(__arm__) || defined(__thumb__) || defined(_M_ARM))
3024 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3025 do { \
3026 /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
3027 /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
3028 /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
3029 __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
3030 (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
3031 (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
3032 } while (0)
3033 # else
3034 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3035 do { \
3036 (outLo) = vmovn_u64 (in); \
3037 (outHi) = vshrn_n_u64 ((in), 32); \
3038 } while (0)
3039 # endif
3040
3041 /*!
3042 * @ingroup tuning
3043 * @brief Controls the NEON to scalar ratio for XXH3
3044 *
3045 * On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and
3046 * 2 lanes on scalar by default.
3047 *
3048 * This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the
3049 * emulated 64-bit arithmetic is too slow.
3050 *
3051 * Modern ARM CPUs are _very_ sensitive to how their pipelines are used.
3052 *
3053 * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't
3054 * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
3055 * you are only using 2/3 of the CPU bandwidth.
3056 *
3057 * This is even more noticable on the more advanced cores like the A76 which
3058 * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
3059 *
3060 * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
3061 * remaining lanes will use scalar instructions. This improves the bandwidth
3062 * and also gives the integer pipelines something to do besides twiddling loop
3063 * counters and pointers.
3064 *
3065 * This change benefits CPUs with large micro-op buffers without negatively affecting
3066 * other CPUs:
3067 *
3068 * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
3069 * |:----------------------|:--------------------|----------:|-----------:|------:|
3070 * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
3071 * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
3072 * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
3073 *
3074 * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
3075 *
3076 * @see XXH3_accumulate_512_neon()
3077 */
3078 # ifndef XXH3_NEON_LANES
3079 # if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
3080 && !defined(__OPTIMIZE_SIZE__)
3081 # define XXH3_NEON_LANES 6
3082 # else
3083 # define XXH3_NEON_LANES XXH_ACC_NB
3084 # endif
3085 # endif
3086 #endif /* XXH_VECTOR == XXH_NEON */
3087
3088 /*
3089 * VSX and Z Vector helpers.
3090 *
3091 * This is very messy, and any pull requests to clean this up are welcome.
3092 *
3093 * There are a lot of problems with supporting VSX and s390x, due to
3094 * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
3095 */
3096 #if XXH_VECTOR == XXH_VSX
3097 # if defined(__s390x__)
3098 # include <s390intrin.h>
3099 # else
3100 /* gcc's altivec.h can have the unwanted consequence to unconditionally
3101 * #define bool, vector, and pixel keywords,
3102 * with bad consequences for programs already using these keywords for other purposes.
3103 * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
3104 * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
3105 * but it seems that, in some cases, it isn't.
3106 * Force the build macro to be defined, so that keywords are not altered.
3107 */
3108 # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3109 # define __APPLE_ALTIVEC__
3110 # endif
3111 # include <altivec.h>
3112 # endif
3113
3114 typedef __vector unsigned long long xxh_u64x2;
3115 typedef __vector unsigned char xxh_u8x16;
3116 typedef __vector unsigned xxh_u32x4;
3117
3118 # ifndef XXH_VSX_BE
3119 # if defined(__BIG_ENDIAN__) \
3120 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3121 # define XXH_VSX_BE 1
3122 # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3123 # warning "-maltivec=be is not recommended. Please use native endianness."
3124 # define XXH_VSX_BE 1
3125 # else
3126 # define XXH_VSX_BE 0
3127 # endif
3128 # endif /* !defined(XXH_VSX_BE) */
3129
3130 # if XXH_VSX_BE
3131 # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3132 # define XXH_vec_revb vec_revb
3133 # else
3134 /*!
3135 * A polyfill for POWER9's vec_revb().
3136 */
XXH_vec_revb(xxh_u64x2 val)3137 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3138 {
3139 xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3140 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3141 return vec_perm(val, val, vByteSwap);
3142 }
3143 # endif
3144 # endif /* XXH_VSX_BE */
3145
3146 /*!
3147 * Performs an unaligned vector load and byte swaps it on big endian.
3148 */
XXH_vec_loadu(const void * ptr)3149 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
3150 {
3151 xxh_u64x2 ret;
3152 XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
3153 # if XXH_VSX_BE
3154 ret = XXH_vec_revb(ret);
3155 # endif
3156 return ret;
3157 }
3158
3159 /*
3160 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3161 *
3162 * These intrinsics weren't added until GCC 8, despite existing for a while,
3163 * and they are endian dependent. Also, their meaning swap depending on version.
3164 * */
3165 # if defined(__s390x__)
3166 /* s390x is always big endian, no issue on this platform */
3167 # define XXH_vec_mulo vec_mulo
3168 # define XXH_vec_mule vec_mule
3169 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3170 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3171 # define XXH_vec_mulo __builtin_altivec_vmulouw
3172 # define XXH_vec_mule __builtin_altivec_vmuleuw
3173 # else
3174 /* gcc needs inline assembly */
3175 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_vec_mulo(xxh_u32x4 a,xxh_u32x4 b)3176 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3177 {
3178 xxh_u64x2 result;
3179 __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3180 return result;
3181 }
XXH_vec_mule(xxh_u32x4 a,xxh_u32x4 b)3182 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3183 {
3184 xxh_u64x2 result;
3185 __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3186 return result;
3187 }
3188 # endif /* XXH_vec_mulo, XXH_vec_mule */
3189 #endif /* XXH_VECTOR == XXH_VSX */
3190
3191
3192 /* prefetch
3193 * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3194 #if defined(XXH_NO_PREFETCH)
3195 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3196 #else
3197 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
3198 # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3199 # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3200 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3201 # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3202 # else
3203 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3204 # endif
3205 #endif /* XXH_NO_PREFETCH */
3206
3207
3208 /* ==========================================
3209 * XXH3 default settings
3210 * ========================================== */
3211
3212 #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
3213
3214 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3215 # error "default keyset is not large enough"
3216 #endif
3217
3218 /*! Pseudorandom secret taken directly from FARSH. */
3219 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3220 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3221 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3222 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3223 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3224 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3225 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3226 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3227 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3228 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3229 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3230 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3231 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3232 };
3233
3234
3235 #ifdef XXH_OLD_NAMES
3236 # define kSecret XXH3_kSecret
3237 #endif
3238
3239 #ifdef XXH_DOXYGEN
3240 /*!
3241 * @brief Calculates a 32-bit to 64-bit long multiply.
3242 *
3243 * Implemented as a macro.
3244 *
3245 * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3246 * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3247 * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3248 * use that instead of the normal method.
3249 *
3250 * If you are compiling for platforms like Thumb-1 and don't have a better option,
3251 * you may also want to write your own long multiply routine here.
3252 *
3253 * @param x, y Numbers to be multiplied
3254 * @return 64-bit product of the low 32 bits of @p x and @p y.
3255 */
3256 XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x,xxh_u64 y)3257 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3258 {
3259 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3260 }
3261 #elif defined(_MSC_VER) && defined(_M_IX86)
3262 # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3263 #else
3264 /*
3265 * Downcast + upcast is usually better than masking on older compilers like
3266 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3267 *
3268 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3269 * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3270 */
3271 # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3272 #endif
3273
3274 /*!
3275 * @brief Calculates a 64->128-bit long multiply.
3276 *
3277 * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3278 * version.
3279 *
3280 * @param lhs , rhs The 64-bit integers to be multiplied
3281 * @return The 128-bit result represented in an @ref XXH128_hash_t.
3282 */
3283 static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs,xxh_u64 rhs)3284 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3285 {
3286 /*
3287 * GCC/Clang __uint128_t method.
3288 *
3289 * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3290 * This is usually the best way as it usually uses a native long 64-bit
3291 * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3292 *
3293 * Usually.
3294 *
3295 * Despite being a 32-bit platform, Clang (and emscripten) define this type
3296 * despite not having the arithmetic for it. This results in a laggy
3297 * compiler builtin call which calculates a full 128-bit multiply.
3298 * In that case it is best to use the portable one.
3299 * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3300 */
3301 #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
3302 && defined(__SIZEOF_INT128__) \
3303 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3304
3305 __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3306 XXH128_hash_t r128;
3307 r128.low64 = (xxh_u64)(product);
3308 r128.high64 = (xxh_u64)(product >> 64);
3309 return r128;
3310
3311 /*
3312 * MSVC for x64's _umul128 method.
3313 *
3314 * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3315 *
3316 * This compiles to single operand MUL on x64.
3317 */
3318 #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
3319
3320 #ifndef _MSC_VER
3321 # pragma intrinsic(_umul128)
3322 #endif
3323 xxh_u64 product_high;
3324 xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3325 XXH128_hash_t r128;
3326 r128.low64 = product_low;
3327 r128.high64 = product_high;
3328 return r128;
3329
3330 /*
3331 * MSVC for ARM64's __umulh method.
3332 *
3333 * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
3334 */
3335 #elif defined(_M_ARM64) || defined(_M_ARM64EC)
3336
3337 #ifndef _MSC_VER
3338 # pragma intrinsic(__umulh)
3339 #endif
3340 XXH128_hash_t r128;
3341 r128.low64 = lhs * rhs;
3342 r128.high64 = __umulh(lhs, rhs);
3343 return r128;
3344
3345 #else
3346 /*
3347 * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3348 *
3349 * This is a fast and simple grade school multiply, which is shown below
3350 * with base 10 arithmetic instead of base 0x100000000.
3351 *
3352 * 9 3 // D2 lhs = 93
3353 * x 7 5 // D2 rhs = 75
3354 * ----------
3355 * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3356 * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3357 * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3358 * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3359 * ---------
3360 * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3361 * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3362 * ---------
3363 * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3364 *
3365 * The reasons for adding the products like this are:
3366 * 1. It avoids manual carry tracking. Just like how
3367 * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3368 * This avoids a lot of complexity.
3369 *
3370 * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
3371 * instruction available in ARM's Digital Signal Processing extension
3372 * in 32-bit ARMv6 and later, which is shown below:
3373 *
3374 * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3375 * {
3376 * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3377 * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3378 * *RdHi = (xxh_u32)(product >> 32);
3379 * }
3380 *
3381 * This instruction was designed for efficient long multiplication, and
3382 * allows this to be calculated in only 4 instructions at speeds
3383 * comparable to some 64-bit ALUs.
3384 *
3385 * 3. It isn't terrible on other platforms. Usually this will be a couple
3386 * of 32-bit ADD/ADCs.
3387 */
3388
3389 /* First calculate all of the cross products. */
3390 xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3391 xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
3392 xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3393 xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
3394
3395 /* Now add the products together. These will never overflow. */
3396 xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3397 xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
3398 xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3399
3400 XXH128_hash_t r128;
3401 r128.low64 = lower;
3402 r128.high64 = upper;
3403 return r128;
3404 #endif
3405 }
3406
3407 /*!
3408 * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3409 *
3410 * The reason for the separate function is to prevent passing too many structs
3411 * around by value. This will hopefully inline the multiply, but we don't force it.
3412 *
3413 * @param lhs , rhs The 64-bit integers to multiply
3414 * @return The low 64 bits of the product XOR'd by the high 64 bits.
3415 * @see XXH_mult64to128()
3416 */
3417 static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs,xxh_u64 rhs)3418 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3419 {
3420 XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3421 return product.low64 ^ product.high64;
3422 }
3423
3424 /*! Seems to produce slightly better code on GCC for some reason. */
XXH_xorshift64(xxh_u64 v64,int shift)3425 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3426 {
3427 XXH_ASSERT(0 <= shift && shift < 64);
3428 return v64 ^ (v64 >> shift);
3429 }
3430
3431 /*
3432 * This is a fast avalanche stage,
3433 * suitable when input bits are already partially mixed
3434 */
XXH3_avalanche(xxh_u64 h64)3435 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3436 {
3437 h64 = XXH_xorshift64(h64, 37);
3438 h64 *= 0x165667919E3779F9ULL;
3439 h64 = XXH_xorshift64(h64, 32);
3440 return h64;
3441 }
3442
3443 /*
3444 * This is a stronger avalanche,
3445 * inspired by Pelle Evensen's rrmxmx
3446 * preferable when input has not been previously mixed
3447 */
XXH3_rrmxmx(xxh_u64 h64,xxh_u64 len)3448 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3449 {
3450 /* this mix is inspired by Pelle Evensen's rrmxmx */
3451 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3452 h64 *= 0x9FB21C651E98DF25ULL;
3453 h64 ^= (h64 >> 35) + len ;
3454 h64 *= 0x9FB21C651E98DF25ULL;
3455 return XXH_xorshift64(h64, 28);
3456 }
3457
3458
3459 /* ==========================================
3460 * Short keys
3461 * ==========================================
3462 * One of the shortcomings of XXH32 and XXH64 was that their performance was
3463 * sub-optimal on short lengths. It used an iterative algorithm which strongly
3464 * favored lengths that were a multiple of 4 or 8.
3465 *
3466 * Instead of iterating over individual inputs, we use a set of single shot
3467 * functions which piece together a range of lengths and operate in constant time.
3468 *
3469 * Additionally, the number of multiplies has been significantly reduced. This
3470 * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3471 *
3472 * Depending on the platform, this may or may not be faster than XXH32, but it
3473 * is almost guaranteed to be faster than XXH64.
3474 */
3475
3476 /*
3477 * At very short lengths, there isn't enough input to fully hide secrets, or use
3478 * the entire secret.
3479 *
3480 * There is also only a limited amount of mixing we can do before significantly
3481 * impacting performance.
3482 *
3483 * Therefore, we use different sections of the secret and always mix two secret
3484 * samples with an XOR. This should have no effect on performance on the
3485 * seedless or withSeed variants because everything _should_ be constant folded
3486 * by modern compilers.
3487 *
3488 * The XOR mixing hides individual parts of the secret and increases entropy.
3489 *
3490 * This adds an extra layer of strength for custom secrets.
3491 */
3492 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3493 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3494 {
3495 XXH_ASSERT(input != NULL);
3496 XXH_ASSERT(1 <= len && len <= 3);
3497 XXH_ASSERT(secret != NULL);
3498 /*
3499 * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3500 * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3501 * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3502 */
3503 { xxh_u8 const c1 = input[0];
3504 xxh_u8 const c2 = input[len >> 1];
3505 xxh_u8 const c3 = input[len - 1];
3506 xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
3507 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
3508 xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3509 xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3510 return XXH64_avalanche(keyed);
3511 }
3512 }
3513
3514 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3515 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3516 {
3517 XXH_ASSERT(input != NULL);
3518 XXH_ASSERT(secret != NULL);
3519 XXH_ASSERT(4 <= len && len <= 8);
3520 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3521 { xxh_u32 const input1 = XXH_readLE32(input);
3522 xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3523 xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3524 xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3525 xxh_u64 const keyed = input64 ^ bitflip;
3526 return XXH3_rrmxmx(keyed, len);
3527 }
3528 }
3529
3530 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3531 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3532 {
3533 XXH_ASSERT(input != NULL);
3534 XXH_ASSERT(secret != NULL);
3535 XXH_ASSERT(9 <= len && len <= 16);
3536 { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3537 xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3538 xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
3539 xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3540 xxh_u64 const acc = len
3541 + XXH_swap64(input_lo) + input_hi
3542 + XXH3_mul128_fold64(input_lo, input_hi);
3543 return XXH3_avalanche(acc);
3544 }
3545 }
3546
3547 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3548 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3549 {
3550 XXH_ASSERT(len <= 16);
3551 { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3552 if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3553 if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3554 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3555 }
3556 }
3557
3558 /*
3559 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3560 * multiplication by zero, affecting hashes of lengths 17 to 240.
3561 *
3562 * However, they are very unlikely.
3563 *
3564 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3565 * unseeded non-cryptographic hashes, it does not attempt to defend itself
3566 * against specially crafted inputs, only random inputs.
3567 *
3568 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3569 * cancelling out the secret is taken an arbitrary number of times (addressed
3570 * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3571 * and/or proper seeding:
3572 *
3573 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3574 * function that is only called up to 16 times per hash with up to 240 bytes of
3575 * input.
3576 *
3577 * This is not too bad for a non-cryptographic hash function, especially with
3578 * only 64 bit outputs.
3579 *
3580 * The 128-bit variant (which trades some speed for strength) is NOT affected
3581 * by this, although it is always a good idea to use a proper seed if you care
3582 * about strength.
3583 */
XXH3_mix16B(const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 seed64)3584 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3585 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3586 {
3587 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3588 && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
3589 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
3590 /*
3591 * UGLY HACK:
3592 * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3593 * slower code.
3594 *
3595 * By forcing seed64 into a register, we disrupt the cost model and
3596 * cause it to scalarize. See `XXH32_round()`
3597 *
3598 * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3599 * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3600 * GCC 9.2, despite both emitting scalar code.
3601 *
3602 * GCC generates much better scalar code than Clang for the rest of XXH3,
3603 * which is why finding a more optimal codepath is an interest.
3604 */
3605 XXH_COMPILER_GUARD(seed64);
3606 #endif
3607 { xxh_u64 const input_lo = XXH_readLE64(input);
3608 xxh_u64 const input_hi = XXH_readLE64(input+8);
3609 return XXH3_mul128_fold64(
3610 input_lo ^ (XXH_readLE64(secret) + seed64),
3611 input_hi ^ (XXH_readLE64(secret+8) - seed64)
3612 );
3613 }
3614 }
3615
3616 /* For mid range keys, XXH3 uses a Mum-hash variant. */
3617 XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3618 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3619 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3620 XXH64_hash_t seed)
3621 {
3622 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3623 XXH_ASSERT(16 < len && len <= 128);
3624
3625 { xxh_u64 acc = len * XXH_PRIME64_1;
3626 if (len > 32) {
3627 if (len > 64) {
3628 if (len > 96) {
3629 acc += XXH3_mix16B(input+48, secret+96, seed);
3630 acc += XXH3_mix16B(input+len-64, secret+112, seed);
3631 }
3632 acc += XXH3_mix16B(input+32, secret+64, seed);
3633 acc += XXH3_mix16B(input+len-48, secret+80, seed);
3634 }
3635 acc += XXH3_mix16B(input+16, secret+32, seed);
3636 acc += XXH3_mix16B(input+len-32, secret+48, seed);
3637 }
3638 acc += XXH3_mix16B(input+0, secret+0, seed);
3639 acc += XXH3_mix16B(input+len-16, secret+16, seed);
3640
3641 return XXH3_avalanche(acc);
3642 }
3643 }
3644
3645 #define XXH3_MIDSIZE_MAX 240
3646
3647 XXH_NO_INLINE XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3648 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3649 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3650 XXH64_hash_t seed)
3651 {
3652 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3653 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3654
3655 #define XXH3_MIDSIZE_STARTOFFSET 3
3656 #define XXH3_MIDSIZE_LASTOFFSET 17
3657
3658 { xxh_u64 acc = len * XXH_PRIME64_1;
3659 int const nbRounds = (int)len / 16;
3660 int i;
3661 for (i=0; i<8; i++) {
3662 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3663 }
3664 acc = XXH3_avalanche(acc);
3665 XXH_ASSERT(nbRounds >= 8);
3666 #if defined(__clang__) /* Clang */ \
3667 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3668 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
3669 /*
3670 * UGLY HACK:
3671 * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3672 * In everywhere else, it uses scalar code.
3673 *
3674 * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3675 * would still be slower than UMAAL (see XXH_mult64to128).
3676 *
3677 * Unfortunately, Clang doesn't handle the long multiplies properly and
3678 * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3679 * scalarized into an ugly mess of VMOV.32 instructions.
3680 *
3681 * This mess is difficult to avoid without turning autovectorization
3682 * off completely, but they are usually relatively minor and/or not
3683 * worth it to fix.
3684 *
3685 * This loop is the easiest to fix, as unlike XXH32, this pragma
3686 * _actually works_ because it is a loop vectorization instead of an
3687 * SLP vectorization.
3688 */
3689 #pragma clang loop vectorize(disable)
3690 #endif
3691 for (i=8 ; i < nbRounds; i++) {
3692 acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3693 }
3694 /* last bytes */
3695 acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3696 return XXH3_avalanche(acc);
3697 }
3698 }
3699
3700
3701 /* ======= Long Keys ======= */
3702
3703 #define XXH_STRIPE_LEN 64
3704 #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
3705 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3706
3707 #ifdef XXH_OLD_NAMES
3708 # define STRIPE_LEN XXH_STRIPE_LEN
3709 # define ACC_NB XXH_ACC_NB
3710 #endif
3711
XXH_writeLE64(void * dst,xxh_u64 v64)3712 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3713 {
3714 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3715 XXH_memcpy(dst, &v64, sizeof(v64));
3716 }
3717
3718 /* Several intrinsic functions below are supposed to accept __int64 as argument,
3719 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3720 * However, several environments do not define __int64 type,
3721 * requiring a workaround.
3722 */
3723 #if !defined (__VMS) \
3724 && (defined (__cplusplus) \
3725 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3726 typedef int64_t xxh_i64;
3727 #else
3728 /* the following type must have a width of 64-bit */
3729 typedef long long xxh_i64;
3730 #endif
3731
3732
3733 /*
3734 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3735 *
3736 * It is a hardened version of UMAC, based off of FARSH's implementation.
3737 *
3738 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3739 * implementations, and it is ridiculously fast.
3740 *
3741 * We harden it by mixing the original input to the accumulators as well as the product.
3742 *
3743 * This means that in the (relatively likely) case of a multiply by zero, the
3744 * original input is preserved.
3745 *
3746 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3747 * cross-pollination, as otherwise the upper and lower halves would be
3748 * essentially independent.
3749 *
3750 * This doesn't matter on 64-bit hashes since they all get merged together in
3751 * the end, so we skip the extra step.
3752 *
3753 * Both XXH3_64bits and XXH3_128bits use this subroutine.
3754 */
3755
3756 #if (XXH_VECTOR == XXH_AVX512) \
3757 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3758
3759 #ifndef XXH_TARGET_AVX512
3760 # define XXH_TARGET_AVX512 /* disable attribute target */
3761 #endif
3762
3763 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3764 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3765 const void* XXH_RESTRICT input,
3766 const void* XXH_RESTRICT secret)
3767 {
3768 __m512i* const xacc = (__m512i *) acc;
3769 XXH_ASSERT((((size_t)acc) & 63) == 0);
3770 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3771
3772 {
3773 /* data_vec = input[0]; */
3774 __m512i const data_vec = _mm512_loadu_si512 (input);
3775 /* key_vec = secret[0]; */
3776 __m512i const key_vec = _mm512_loadu_si512 (secret);
3777 /* data_key = data_vec ^ key_vec; */
3778 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3779 /* data_key_lo = data_key >> 32; */
3780 __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3781 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3782 __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
3783 /* xacc[0] += swap(data_vec); */
3784 __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3785 __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
3786 /* xacc[0] += product; */
3787 *xacc = _mm512_add_epi64(product, sum);
3788 }
3789 }
3790
3791 /*
3792 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3793 *
3794 * Multiplication isn't perfect, as explained by Google in HighwayHash:
3795 *
3796 * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3797 * // varying degrees. In descending order of goodness, bytes
3798 * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3799 * // As expected, the upper and lower bytes are much worse.
3800 *
3801 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3802 *
3803 * Since our algorithm uses a pseudorandom secret to add some variance into the
3804 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3805 *
3806 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3807 * extraction.
3808 *
3809 * Both XXH3_64bits and XXH3_128bits use this subroutine.
3810 */
3811
3812 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3813 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3814 {
3815 XXH_ASSERT((((size_t)acc) & 63) == 0);
3816 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3817 { __m512i* const xacc = (__m512i*) acc;
3818 const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3819
3820 /* xacc[0] ^= (xacc[0] >> 47) */
3821 __m512i const acc_vec = *xacc;
3822 __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
3823 __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
3824 /* xacc[0] ^= secret; */
3825 __m512i const key_vec = _mm512_loadu_si512 (secret);
3826 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3827
3828 /* xacc[0] *= XXH_PRIME32_1; */
3829 __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3830 __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
3831 __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
3832 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3833 }
3834 }
3835
3836 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3837 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3838 {
3839 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3840 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3841 XXH_ASSERT(((size_t)customSecret & 63) == 0);
3842 (void)(&XXH_writeLE64);
3843 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3844 __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
3845
3846 const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
3847 __m512i* const dest = ( __m512i*) customSecret;
3848 int i;
3849 XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
3850 XXH_ASSERT(((size_t)dest & 63) == 0);
3851 for (i=0; i < nbRounds; ++i) {
3852 /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3853 * this will warn "discards 'const' qualifier". */
3854 union {
3855 const __m512i* cp;
3856 void* p;
3857 } remote_const_void;
3858 remote_const_void.cp = src + i;
3859 dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3860 } }
3861 }
3862
3863 #endif
3864
3865 #if (XXH_VECTOR == XXH_AVX2) \
3866 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3867
3868 #ifndef XXH_TARGET_AVX2
3869 # define XXH_TARGET_AVX2 /* disable attribute target */
3870 #endif
3871
3872 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3873 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3874 const void* XXH_RESTRICT input,
3875 const void* XXH_RESTRICT secret)
3876 {
3877 XXH_ASSERT((((size_t)acc) & 31) == 0);
3878 { __m256i* const xacc = (__m256i *) acc;
3879 /* Unaligned. This is mainly for pointer arithmetic, and because
3880 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3881 const __m256i* const xinput = (const __m256i *) input;
3882 /* Unaligned. This is mainly for pointer arithmetic, and because
3883 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3884 const __m256i* const xsecret = (const __m256i *) secret;
3885
3886 size_t i;
3887 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3888 /* data_vec = xinput[i]; */
3889 __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
3890 /* key_vec = xsecret[i]; */
3891 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3892 /* data_key = data_vec ^ key_vec; */
3893 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3894 /* data_key_lo = data_key >> 32; */
3895 __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3896 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3897 __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
3898 /* xacc[i] += swap(data_vec); */
3899 __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3900 __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
3901 /* xacc[i] += product; */
3902 xacc[i] = _mm256_add_epi64(product, sum);
3903 } }
3904 }
3905
3906 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3907 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3908 {
3909 XXH_ASSERT((((size_t)acc) & 31) == 0);
3910 { __m256i* const xacc = (__m256i*) acc;
3911 /* Unaligned. This is mainly for pointer arithmetic, and because
3912 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3913 const __m256i* const xsecret = (const __m256i *) secret;
3914 const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3915
3916 size_t i;
3917 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3918 /* xacc[i] ^= (xacc[i] >> 47) */
3919 __m256i const acc_vec = xacc[i];
3920 __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
3921 __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
3922 /* xacc[i] ^= xsecret; */
3923 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3924 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3925
3926 /* xacc[i] *= XXH_PRIME32_1; */
3927 __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3928 __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
3929 __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
3930 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3931 }
3932 }
3933 }
3934
XXH3_initCustomSecret_avx2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3935 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3936 {
3937 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3938 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3939 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3940 (void)(&XXH_writeLE64);
3941 XXH_PREFETCH(customSecret);
3942 { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
3943
3944 const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
3945 __m256i* dest = ( __m256i*) customSecret;
3946
3947 # if defined(__GNUC__) || defined(__clang__)
3948 /*
3949 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3950 * - do not extract the secret from sse registers in the internal loop
3951 * - use less common registers, and avoid pushing these reg into stack
3952 */
3953 XXH_COMPILER_GUARD(dest);
3954 # endif
3955 XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
3956 XXH_ASSERT(((size_t)dest & 31) == 0);
3957
3958 /* GCC -O2 need unroll loop manually */
3959 dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3960 dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3961 dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3962 dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3963 dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3964 dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3965 }
3966 }
3967
3968 #endif
3969
3970 /* x86dispatch always generates SSE2 */
3971 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3972
3973 #ifndef XXH_TARGET_SSE2
3974 # define XXH_TARGET_SSE2 /* disable attribute target */
3975 #endif
3976
3977 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3978 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3979 const void* XXH_RESTRICT input,
3980 const void* XXH_RESTRICT secret)
3981 {
3982 /* SSE2 is just a half-scale version of the AVX2 version. */
3983 XXH_ASSERT((((size_t)acc) & 15) == 0);
3984 { __m128i* const xacc = (__m128i *) acc;
3985 /* Unaligned. This is mainly for pointer arithmetic, and because
3986 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3987 const __m128i* const xinput = (const __m128i *) input;
3988 /* Unaligned. This is mainly for pointer arithmetic, and because
3989 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3990 const __m128i* const xsecret = (const __m128i *) secret;
3991
3992 size_t i;
3993 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3994 /* data_vec = xinput[i]; */
3995 __m128i const data_vec = _mm_loadu_si128 (xinput+i);
3996 /* key_vec = xsecret[i]; */
3997 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3998 /* data_key = data_vec ^ key_vec; */
3999 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
4000 /* data_key_lo = data_key >> 32; */
4001 __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4002 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4003 __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
4004 /* xacc[i] += swap(data_vec); */
4005 __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
4006 __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
4007 /* xacc[i] += product; */
4008 xacc[i] = _mm_add_epi64(product, sum);
4009 } }
4010 }
4011
4012 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4013 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4014 {
4015 XXH_ASSERT((((size_t)acc) & 15) == 0);
4016 { __m128i* const xacc = (__m128i*) acc;
4017 /* Unaligned. This is mainly for pointer arithmetic, and because
4018 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
4019 const __m128i* const xsecret = (const __m128i *) secret;
4020 const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
4021
4022 size_t i;
4023 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
4024 /* xacc[i] ^= (xacc[i] >> 47) */
4025 __m128i const acc_vec = xacc[i];
4026 __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
4027 __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
4028 /* xacc[i] ^= xsecret[i]; */
4029 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
4030 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
4031
4032 /* xacc[i] *= XXH_PRIME32_1; */
4033 __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4034 __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
4035 __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
4036 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
4037 }
4038 }
4039 }
4040
XXH3_initCustomSecret_sse2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4041 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4042 {
4043 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4044 (void)(&XXH_writeLE64);
4045 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
4046
4047 # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4048 /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
4049 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
4050 __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
4051 # else
4052 __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
4053 # endif
4054 int i;
4055
4056 const void* const src16 = XXH3_kSecret;
4057 __m128i* dst16 = (__m128i*) customSecret;
4058 # if defined(__GNUC__) || defined(__clang__)
4059 /*
4060 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
4061 * - do not extract the secret from sse registers in the internal loop
4062 * - use less common registers, and avoid pushing these reg into stack
4063 */
4064 XXH_COMPILER_GUARD(dst16);
4065 # endif
4066 XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
4067 XXH_ASSERT(((size_t)dst16 & 15) == 0);
4068
4069 for (i=0; i < nbRounds; ++i) {
4070 dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
4071 } }
4072 }
4073
4074 #endif
4075
4076 #if (XXH_VECTOR == XXH_NEON)
4077
4078 /* forward declarations for the scalar routines */
4079 XXH_FORCE_INLINE void
4080 XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
4081 void const* XXH_RESTRICT secret, size_t lane);
4082
4083 XXH_FORCE_INLINE void
4084 XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4085 void const* XXH_RESTRICT secret, size_t lane);
4086
4087 /*!
4088 * @internal
4089 * @brief The bulk processing loop for NEON.
4090 *
4091 * The NEON code path is actually partially scalar when running on AArch64. This
4092 * is to optimize the pipelining and can have up to 15% speedup depending on the
4093 * CPU, and it also mitigates some GCC codegen issues.
4094 *
4095 * @see XXH3_NEON_LANES for configuring this and details about this optimization.
4096 */
4097 XXH_FORCE_INLINE void
XXH3_accumulate_512_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4098 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
4099 const void* XXH_RESTRICT input,
4100 const void* XXH_RESTRICT secret)
4101 {
4102 XXH_ASSERT((((size_t)acc) & 15) == 0);
4103 XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
4104 {
4105 uint64x2_t* const xacc = (uint64x2_t *) acc;
4106 /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
4107 uint8_t const* const xinput = (const uint8_t *) input;
4108 uint8_t const* const xsecret = (const uint8_t *) secret;
4109
4110 size_t i;
4111 /* NEON for the first few lanes (these loops are normally interleaved) */
4112 for (i=0; i < XXH3_NEON_LANES / 2; i++) {
4113 /* data_vec = xinput[i]; */
4114 uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
4115 /* key_vec = xsecret[i]; */
4116 uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
4117 uint64x2_t data_key;
4118 uint32x2_t data_key_lo, data_key_hi;
4119 /* xacc[i] += swap(data_vec); */
4120 uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
4121 uint64x2_t const swapped = vextq_u64(data64, data64, 1);
4122 xacc[i] = vaddq_u64 (xacc[i], swapped);
4123 /* data_key = data_vec ^ key_vec; */
4124 data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4125 /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
4126 * data_key_hi = (uint32x2_t) (data_key >> 32);
4127 * data_key = UNDEFINED; */
4128 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4129 /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4130 xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
4131
4132 }
4133 /* Scalar for the remainder. This may be a zero iteration loop. */
4134 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4135 XXH3_scalarRound(acc, input, secret, i);
4136 }
4137 }
4138 }
4139
4140 XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4141 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4142 {
4143 XXH_ASSERT((((size_t)acc) & 15) == 0);
4144
4145 { uint64x2_t* xacc = (uint64x2_t*) acc;
4146 uint8_t const* xsecret = (uint8_t const*) secret;
4147 uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
4148
4149 size_t i;
4150 /* NEON for the first few lanes (these loops are normally interleaved) */
4151 for (i=0; i < XXH3_NEON_LANES / 2; i++) {
4152 /* xacc[i] ^= (xacc[i] >> 47); */
4153 uint64x2_t acc_vec = xacc[i];
4154 uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
4155 uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
4156
4157 /* xacc[i] ^= xsecret[i]; */
4158 uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16));
4159 uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec));
4160
4161 /* xacc[i] *= XXH_PRIME32_1 */
4162 uint32x2_t data_key_lo, data_key_hi;
4163 /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
4164 * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
4165 * xacc[i] = UNDEFINED; */
4166 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4167 { /*
4168 * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
4169 *
4170 * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
4171 * incorrectly "optimize" this:
4172 * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
4173 * shifted = vshll_n_u32(tmp, 32);
4174 * to this:
4175 * tmp = "vmulq_u64"(a, b); // no such thing!
4176 * shifted = vshlq_n_u64(tmp, 32);
4177 *
4178 * However, unlike SSE, Clang lacks a 64-bit multiply routine
4179 * for NEON, and it scalarizes two 64-bit multiplies instead.
4180 *
4181 * vmull_u32 has the same timing as vmul_u32, and it avoids
4182 * this bug completely.
4183 * See https://bugs.llvm.org/show_bug.cgi?id=39967
4184 */
4185 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4186 /* xacc[i] = prod_hi << 32; */
4187 xacc[i] = vshlq_n_u64(prod_hi, 32);
4188 /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
4189 xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
4190 }
4191 }
4192 /* Scalar for the remainder. This may be a zero iteration loop. */
4193 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4194 XXH3_scalarScrambleRound(acc, secret, i);
4195 }
4196 }
4197 }
4198
4199 #endif
4200
4201 #if (XXH_VECTOR == XXH_VSX)
4202
4203 XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4204 XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
4205 const void* XXH_RESTRICT input,
4206 const void* XXH_RESTRICT secret)
4207 {
4208 /* presumed aligned */
4209 unsigned int* const xacc = (unsigned int*) acc;
4210 xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
4211 xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
4212 xxh_u64x2 const v32 = { 32, 32 };
4213 size_t i;
4214 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4215 /* data_vec = xinput[i]; */
4216 xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4217 /* key_vec = xsecret[i]; */
4218 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4219 xxh_u64x2 const data_key = data_vec ^ key_vec;
4220 /* shuffled = (data_key << 32) | (data_key >> 32); */
4221 xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4222 /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4223 xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4224 /* acc_vec = xacc[i]; */
4225 xxh_u64x2 acc_vec = (xxh_u64x2)vec_xl(0, xacc + 4 * i);
4226 acc_vec += product;
4227
4228 /* swap high and low halves */
4229 #ifdef __s390x__
4230 acc_vec += vec_permi(data_vec, data_vec, 2);
4231 #else
4232 acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4233 #endif
4234 /* xacc[i] = acc_vec; */
4235 vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i);
4236 }
4237 }
4238
4239 XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4240 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4241 {
4242 XXH_ASSERT((((size_t)acc) & 15) == 0);
4243
4244 { xxh_u64x2* const xacc = (xxh_u64x2*) acc;
4245 const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4246 /* constants */
4247 xxh_u64x2 const v32 = { 32, 32 };
4248 xxh_u64x2 const v47 = { 47, 47 };
4249 xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4250 size_t i;
4251 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4252 /* xacc[i] ^= (xacc[i] >> 47); */
4253 xxh_u64x2 const acc_vec = xacc[i];
4254 xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4255
4256 /* xacc[i] ^= xsecret[i]; */
4257 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4258 xxh_u64x2 const data_key = data_vec ^ key_vec;
4259
4260 /* xacc[i] *= XXH_PRIME32_1 */
4261 /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
4262 xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
4263 /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
4264 xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4265 xacc[i] = prod_odd + (prod_even << v32);
4266 } }
4267 }
4268
4269 #endif
4270
4271 /* scalar variants - universal */
4272
4273 /*!
4274 * @internal
4275 * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
4276 *
4277 * This is extracted to its own function because the NEON path uses a combination
4278 * of NEON and scalar.
4279 */
4280 XXH_FORCE_INLINE void
XXH3_scalarRound(void * XXH_RESTRICT acc,void const * XXH_RESTRICT input,void const * XXH_RESTRICT secret,size_t lane)4281 XXH3_scalarRound(void* XXH_RESTRICT acc,
4282 void const* XXH_RESTRICT input,
4283 void const* XXH_RESTRICT secret,
4284 size_t lane)
4285 {
4286 xxh_u64* xacc = (xxh_u64*) acc;
4287 xxh_u8 const* xinput = (xxh_u8 const*) input;
4288 xxh_u8 const* xsecret = (xxh_u8 const*) secret;
4289 XXH_ASSERT(lane < XXH_ACC_NB);
4290 XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4291 {
4292 xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
4293 xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
4294 xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
4295 xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4296 }
4297 }
4298
4299 /*!
4300 * @internal
4301 * @brief Processes a 64 byte block of data using the scalar path.
4302 */
4303 XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4304 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4305 const void* XXH_RESTRICT input,
4306 const void* XXH_RESTRICT secret)
4307 {
4308 size_t i;
4309 for (i=0; i < XXH_ACC_NB; i++) {
4310 XXH3_scalarRound(acc, input, secret, i);
4311 }
4312 }
4313
4314 /*!
4315 * @internal
4316 * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
4317 *
4318 * This is extracted to its own function because the NEON path uses a combination
4319 * of NEON and scalar.
4320 */
4321 XXH_FORCE_INLINE void
XXH3_scalarScrambleRound(void * XXH_RESTRICT acc,void const * XXH_RESTRICT secret,size_t lane)4322 XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4323 void const* XXH_RESTRICT secret,
4324 size_t lane)
4325 {
4326 xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4327 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4328 XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4329 XXH_ASSERT(lane < XXH_ACC_NB);
4330 {
4331 xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
4332 xxh_u64 acc64 = xacc[lane];
4333 acc64 = XXH_xorshift64(acc64, 47);
4334 acc64 ^= key64;
4335 acc64 *= XXH_PRIME32_1;
4336 xacc[lane] = acc64;
4337 }
4338 }
4339
4340 /*!
4341 * @internal
4342 * @brief Scrambles the accumulators after a large chunk has been read
4343 */
4344 XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4345 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4346 {
4347 size_t i;
4348 for (i=0; i < XXH_ACC_NB; i++) {
4349 XXH3_scalarScrambleRound(acc, secret, i);
4350 }
4351 }
4352
4353 XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4354 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4355 {
4356 /*
4357 * We need a separate pointer for the hack below,
4358 * which requires a non-const pointer.
4359 * Any decent compiler will optimize this out otherwise.
4360 */
4361 const xxh_u8* kSecretPtr = XXH3_kSecret;
4362 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4363
4364 #if defined(__clang__) && defined(__aarch64__)
4365 /*
4366 * UGLY HACK:
4367 * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4368 * placed sequentially, in order, at the top of the unrolled loop.
4369 *
4370 * While MOVK is great for generating constants (2 cycles for a 64-bit
4371 * constant compared to 4 cycles for LDR), it fights for bandwidth with
4372 * the arithmetic instructions.
4373 *
4374 * I L S
4375 * MOVK
4376 * MOVK
4377 * MOVK
4378 * MOVK
4379 * ADD
4380 * SUB STR
4381 * STR
4382 * By forcing loads from memory (as the asm line causes Clang to assume
4383 * that XXH3_kSecretPtr has been changed), the pipelines are used more
4384 * efficiently:
4385 * I L S
4386 * LDR
4387 * ADD LDR
4388 * SUB STR
4389 * STR
4390 *
4391 * See XXH3_NEON_LANES for details on the pipsline.
4392 *
4393 * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4394 * without hack: 2654.4 MB/s
4395 * with hack: 3202.9 MB/s
4396 */
4397 XXH_COMPILER_GUARD(kSecretPtr);
4398 #endif
4399 /*
4400 * Note: in debug mode, this overrides the asm optimization
4401 * and Clang will emit MOVK chains again.
4402 */
4403 XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4404
4405 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4406 int i;
4407 for (i=0; i < nbRounds; i++) {
4408 /*
4409 * The asm hack causes Clang to assume that kSecretPtr aliases with
4410 * customSecret, and on aarch64, this prevented LDP from merging two
4411 * loads together for free. Putting the loads together before the stores
4412 * properly generates LDP.
4413 */
4414 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
4415 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4416 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
4417 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4418 } }
4419 }
4420
4421
4422 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4423 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4424 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4425
4426
4427 #if (XXH_VECTOR == XXH_AVX512)
4428
4429 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4430 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
4431 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4432
4433 #elif (XXH_VECTOR == XXH_AVX2)
4434
4435 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4436 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
4437 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4438
4439 #elif (XXH_VECTOR == XXH_SSE2)
4440
4441 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4442 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
4443 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4444
4445 #elif (XXH_VECTOR == XXH_NEON)
4446
4447 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4448 #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
4449 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4450
4451 #elif (XXH_VECTOR == XXH_VSX)
4452
4453 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4454 #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
4455 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4456
4457 #else /* scalar */
4458
4459 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4460 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
4461 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4462
4463 #endif
4464
4465
4466
4467 #ifndef XXH_PREFETCH_DIST
4468 # ifdef __clang__
4469 # define XXH_PREFETCH_DIST 320
4470 # else
4471 # if (XXH_VECTOR == XXH_AVX512)
4472 # define XXH_PREFETCH_DIST 512
4473 # else
4474 # define XXH_PREFETCH_DIST 384
4475 # endif
4476 # endif /* __clang__ */
4477 #endif /* XXH_PREFETCH_DIST */
4478
4479 /*
4480 * XXH3_accumulate()
4481 * Loops over XXH3_accumulate_512().
4482 * Assumption: nbStripes will not overflow the secret size
4483 */
4484 XXH_FORCE_INLINE void
XXH3_accumulate(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,size_t nbStripes,XXH3_f_accumulate_512 f_acc512)4485 XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
4486 const xxh_u8* XXH_RESTRICT input,
4487 const xxh_u8* XXH_RESTRICT secret,
4488 size_t nbStripes,
4489 XXH3_f_accumulate_512 f_acc512)
4490 {
4491 size_t n;
4492 for (n = 0; n < nbStripes; n++ ) {
4493 const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4494 XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4495 f_acc512(acc,
4496 in,
4497 secret + n*XXH_SECRET_CONSUME_RATE);
4498 }
4499 }
4500
4501 XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4502 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4503 const xxh_u8* XXH_RESTRICT input, size_t len,
4504 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4505 XXH3_f_accumulate_512 f_acc512,
4506 XXH3_f_scrambleAcc f_scramble)
4507 {
4508 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4509 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4510 size_t const nb_blocks = (len - 1) / block_len;
4511
4512 size_t n;
4513
4514 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4515
4516 for (n = 0; n < nb_blocks; n++) {
4517 XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4518 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4519 }
4520
4521 /* last partial block */
4522 XXH_ASSERT(len > XXH_STRIPE_LEN);
4523 { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4524 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4525 XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4526
4527 /* last stripe */
4528 { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4529 #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
4530 f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4531 } }
4532 }
4533
4534 XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret)4535 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4536 {
4537 return XXH3_mul128_fold64(
4538 acc[0] ^ XXH_readLE64(secret),
4539 acc[1] ^ XXH_readLE64(secret+8) );
4540 }
4541
4542 static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 start)4543 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4544 {
4545 xxh_u64 result64 = start;
4546 size_t i = 0;
4547
4548 for (i = 0; i < 4; i++) {
4549 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4550 #if defined(__clang__) /* Clang */ \
4551 && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
4552 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4553 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
4554 /*
4555 * UGLY HACK:
4556 * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4557 * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4558 * XXH3_64bits, len == 256, Snapdragon 835:
4559 * without hack: 2063.7 MB/s
4560 * with hack: 2560.7 MB/s
4561 */
4562 XXH_COMPILER_GUARD(result64);
4563 #endif
4564 }
4565
4566 return XXH3_avalanche(result64);
4567 }
4568
4569 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4570 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4571
4572 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void * XXH_RESTRICT input,size_t len,const void * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4573 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4574 const void* XXH_RESTRICT secret, size_t secretSize,
4575 XXH3_f_accumulate_512 f_acc512,
4576 XXH3_f_scrambleAcc f_scramble)
4577 {
4578 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4579
4580 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4581
4582 /* converge into final hash */
4583 XXH_STATIC_ASSERT(sizeof(acc) == 64);
4584 /* do not align on 8, so that the secret is different from the accumulator */
4585 #define XXH_SECRET_MERGEACCS_START 11
4586 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4587 return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4588 }
4589
4590 /*
4591 * It's important for performance to transmit secret's size (when it's static)
4592 * so that the compiler can properly optimize the vectorized loop.
4593 * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
4594 */
4595 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4596 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4597 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4598 {
4599 (void)seed64;
4600 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4601 }
4602
4603 /*
4604 * It's preferable for performance that XXH3_hashLong is not inlined,
4605 * as it results in a smaller function for small data, easier to the instruction cache.
4606 * Note that inside this no_inline function, we do inline the internal loop,
4607 * and provide a statically defined secret size to allow optimization of vector loop.
4608 */
4609 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4610 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4611 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4612 {
4613 (void)seed64; (void)secret; (void)secretLen;
4614 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4615 }
4616
4617 /*
4618 * XXH3_hashLong_64b_withSeed():
4619 * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4620 * and then use this key for long mode hashing.
4621 *
4622 * This operation is decently fast but nonetheless costs a little bit of time.
4623 * Try to avoid it whenever possible (typically when seed==0).
4624 *
4625 * It's important for performance that XXH3_hashLong is not inlined. Not sure
4626 * why (uop cache maybe?), but the difference is large and easily measurable.
4627 */
4628 XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void * input,size_t len,XXH64_hash_t seed,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)4629 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4630 XXH64_hash_t seed,
4631 XXH3_f_accumulate_512 f_acc512,
4632 XXH3_f_scrambleAcc f_scramble,
4633 XXH3_f_initCustomSecret f_initSec)
4634 {
4635 if (seed == 0)
4636 return XXH3_hashLong_64b_internal(input, len,
4637 XXH3_kSecret, sizeof(XXH3_kSecret),
4638 f_acc512, f_scramble);
4639 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4640 f_initSec(secret, seed);
4641 return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4642 f_acc512, f_scramble);
4643 }
4644 }
4645
4646 /*
4647 * It's important for performance that XXH3_hashLong is not inlined.
4648 */
4649 XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void * input,size_t len,XXH64_hash_t seed,const xxh_u8 * secret,size_t secretLen)4650 XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4651 XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4652 {
4653 (void)secret; (void)secretLen;
4654 return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4655 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4656 }
4657
4658
4659 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4660 XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4661
4662 XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong64_f f_hashLong)4663 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4664 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4665 XXH3_hashLong64_f f_hashLong)
4666 {
4667 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4668 /*
4669 * If an action is to be taken if `secretLen` condition is not respected,
4670 * it should be done here.
4671 * For now, it's a contract pre-condition.
4672 * Adding a check and a branch here would cost performance at every hash.
4673 * Also, note that function signature doesn't offer room to return an error.
4674 */
4675 if (len <= 16)
4676 return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4677 if (len <= 128)
4678 return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4679 if (len <= XXH3_MIDSIZE_MAX)
4680 return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4681 return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4682 }
4683
4684
4685 /* === Public entry point === */
4686
4687 /*! @ingroup xxh3_family */
XXH3_64bits(const void * input,size_t len)4688 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4689 {
4690 return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4691 }
4692
4693 /*! @ingroup xxh3_family */
4694 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)4695 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4696 {
4697 return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4698 }
4699
4700 /*! @ingroup xxh3_family */
4701 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)4702 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4703 {
4704 return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4705 }
4706
4707 XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecretandSeed(const void * input,size_t len,const void * secret,size_t secretSize,XXH64_hash_t seed)4708 XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
4709 {
4710 if (len <= XXH3_MIDSIZE_MAX)
4711 return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
4712 return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize);
4713 }
4714
4715
4716 /* === XXH3 streaming === */
4717
4718 /*
4719 * Malloc's a pointer that is always aligned to align.
4720 *
4721 * This must be freed with `XXH_alignedFree()`.
4722 *
4723 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4724 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4725 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4726 *
4727 * This underalignment previously caused a rather obvious crash which went
4728 * completely unnoticed due to XXH3_createState() not actually being tested.
4729 * Credit to RedSpah for noticing this bug.
4730 *
4731 * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4732 * are avoided: To maintain portability, we would have to write a fallback
4733 * like this anyways, and besides, testing for the existence of library
4734 * functions without relying on external build tools is impossible.
4735 *
4736 * The method is simple: Overallocate, manually align, and store the offset
4737 * to the original behind the returned pointer.
4738 *
4739 * Align must be a power of 2 and 8 <= align <= 128.
4740 */
XXH_alignedMalloc(size_t s,size_t align)4741 static void* XXH_alignedMalloc(size_t s, size_t align)
4742 {
4743 XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4744 XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
4745 XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
4746 { /* Overallocate to make room for manual realignment and an offset byte */
4747 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4748 if (base != NULL) {
4749 /*
4750 * Get the offset needed to align this pointer.
4751 *
4752 * Even if the returned pointer is aligned, there will always be
4753 * at least one byte to store the offset to the original pointer.
4754 */
4755 size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4756 /* Add the offset for the now-aligned pointer */
4757 xxh_u8* ptr = base + offset;
4758
4759 XXH_ASSERT((size_t)ptr % align == 0);
4760
4761 /* Store the offset immediately before the returned pointer. */
4762 ptr[-1] = (xxh_u8)offset;
4763 return ptr;
4764 }
4765 return NULL;
4766 }
4767 }
4768 /*
4769 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4770 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4771 */
XXH_alignedFree(void * p)4772 static void XXH_alignedFree(void* p)
4773 {
4774 if (p != NULL) {
4775 xxh_u8* ptr = (xxh_u8*)p;
4776 /* Get the offset byte we added in XXH_malloc. */
4777 xxh_u8 offset = ptr[-1];
4778 /* Free the original malloc'd pointer */
4779 xxh_u8* base = ptr - offset;
4780 XXH_free(base);
4781 }
4782 }
4783 /*! @ingroup xxh3_family */
XXH3_createState(void)4784 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
4785 {
4786 XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4787 if (state==NULL) return NULL;
4788 XXH3_INITSTATE(state);
4789 return state;
4790 }
4791
4792 /*! @ingroup xxh3_family */
XXH3_freeState(XXH3_state_t * statePtr)4793 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
4794 {
4795 XXH_alignedFree(statePtr);
4796 return XXH_OK;
4797 }
4798
4799 /*! @ingroup xxh3_family */
4800 XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t * dst_state,const XXH3_state_t * src_state)4801 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4802 {
4803 XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
4804 }
4805
4806 static void
XXH3_reset_internal(XXH3_state_t * statePtr,XXH64_hash_t seed,const void * secret,size_t secretSize)4807 XXH3_reset_internal(XXH3_state_t* statePtr,
4808 XXH64_hash_t seed,
4809 const void* secret, size_t secretSize)
4810 {
4811 size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4812 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4813 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4814 XXH_ASSERT(statePtr != NULL);
4815 /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4816 memset((char*)statePtr + initStart, 0, initLength);
4817 statePtr->acc[0] = XXH_PRIME32_3;
4818 statePtr->acc[1] = XXH_PRIME64_1;
4819 statePtr->acc[2] = XXH_PRIME64_2;
4820 statePtr->acc[3] = XXH_PRIME64_3;
4821 statePtr->acc[4] = XXH_PRIME64_4;
4822 statePtr->acc[5] = XXH_PRIME32_2;
4823 statePtr->acc[6] = XXH_PRIME64_5;
4824 statePtr->acc[7] = XXH_PRIME32_1;
4825 statePtr->seed = seed;
4826 statePtr->useSeed = (seed != 0);
4827 statePtr->extSecret = (const unsigned char*)secret;
4828 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4829 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4830 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4831 }
4832
4833 /*! @ingroup xxh3_family */
4834 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t * statePtr)4835 XXH3_64bits_reset(XXH3_state_t* statePtr)
4836 {
4837 if (statePtr == NULL) return XXH_ERROR;
4838 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4839 return XXH_OK;
4840 }
4841
4842 /*! @ingroup xxh3_family */
4843 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)4844 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4845 {
4846 if (statePtr == NULL) return XXH_ERROR;
4847 XXH3_reset_internal(statePtr, 0, secret, secretSize);
4848 if (secret == NULL) return XXH_ERROR;
4849 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4850 return XXH_OK;
4851 }
4852
4853 /*! @ingroup xxh3_family */
4854 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)4855 XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
4856 {
4857 if (statePtr == NULL) return XXH_ERROR;
4858 if (seed==0) return XXH3_64bits_reset(statePtr);
4859 if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
4860 XXH3_initCustomSecret(statePtr->customSecret, seed);
4861 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4862 return XXH_OK;
4863 }
4864
4865 /*! @ingroup xxh3_family */
4866 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH3_state_t * statePtr,const void * secret,size_t secretSize,XXH64_hash_t seed64)4867 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
4868 {
4869 if (statePtr == NULL) return XXH_ERROR;
4870 if (secret == NULL) return XXH_ERROR;
4871 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4872 XXH3_reset_internal(statePtr, seed64, secret, secretSize);
4873 statePtr->useSeed = 1; /* always, even if seed64==0 */
4874 return XXH_OK;
4875 }
4876
4877 /* Note : when XXH3_consumeStripes() is invoked,
4878 * there must be a guarantee that at least one more byte must be consumed from input
4879 * so that the function can blindly consume all stripes using the "normal" secret segment */
4880 XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64 * XXH_RESTRICT acc,size_t * XXH_RESTRICT nbStripesSoFarPtr,size_t nbStripesPerBlock,const xxh_u8 * XXH_RESTRICT input,size_t nbStripes,const xxh_u8 * XXH_RESTRICT secret,size_t secretLimit,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4881 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4882 size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4883 const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4884 const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4885 XXH3_f_accumulate_512 f_acc512,
4886 XXH3_f_scrambleAcc f_scramble)
4887 {
4888 XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
4889 XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4890 if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4891 /* need a scrambling operation */
4892 size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4893 size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4894 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4895 f_scramble(acc, secret + secretLimit);
4896 XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4897 *nbStripesSoFarPtr = nbStripesAfterBlock;
4898 } else {
4899 XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4900 *nbStripesSoFarPtr += nbStripes;
4901 }
4902 }
4903
4904 #ifndef XXH3_STREAM_USE_STACK
4905 # ifndef __clang__ /* clang doesn't need additional stack space */
4906 # define XXH3_STREAM_USE_STACK 1
4907 # endif
4908 #endif
4909 /*
4910 * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4911 */
4912 XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t * XXH_RESTRICT const state,const xxh_u8 * XXH_RESTRICT input,size_t len,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4913 XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
4914 const xxh_u8* XXH_RESTRICT input, size_t len,
4915 XXH3_f_accumulate_512 f_acc512,
4916 XXH3_f_scrambleAcc f_scramble)
4917 {
4918 if (input==NULL) {
4919 XXH_ASSERT(len == 0);
4920 return XXH_OK;
4921 }
4922
4923 XXH_ASSERT(state != NULL);
4924 { const xxh_u8* const bEnd = input + len;
4925 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4926 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4927 /* For some reason, gcc and MSVC seem to suffer greatly
4928 * when operating accumulators directly into state.
4929 * Operating into stack space seems to enable proper optimization.
4930 * clang, on the other hand, doesn't seem to need this trick */
4931 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
4932 #else
4933 xxh_u64* XXH_RESTRICT const acc = state->acc;
4934 #endif
4935 state->totalLen += len;
4936 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4937
4938 /* small input : just fill in tmp buffer */
4939 if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
4940 XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4941 state->bufferedSize += (XXH32_hash_t)len;
4942 return XXH_OK;
4943 }
4944
4945 /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4946 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4947 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
4948
4949 /*
4950 * Internal buffer is partially filled (always, except at beginning)
4951 * Complete it, then consume it.
4952 */
4953 if (state->bufferedSize) {
4954 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4955 XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4956 input += loadSize;
4957 XXH3_consumeStripes(acc,
4958 &state->nbStripesSoFar, state->nbStripesPerBlock,
4959 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4960 secret, state->secretLimit,
4961 f_acc512, f_scramble);
4962 state->bufferedSize = 0;
4963 }
4964 XXH_ASSERT(input < bEnd);
4965
4966 /* large input to consume : ingest per full block */
4967 if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
4968 size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
4969 XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
4970 /* join to current block's end */
4971 { size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
4972 XXH_ASSERT(nbStripesToEnd <= nbStripes);
4973 XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
4974 f_scramble(acc, secret + state->secretLimit);
4975 state->nbStripesSoFar = 0;
4976 input += nbStripesToEnd * XXH_STRIPE_LEN;
4977 nbStripes -= nbStripesToEnd;
4978 }
4979 /* consume per entire blocks */
4980 while(nbStripes >= state->nbStripesPerBlock) {
4981 XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
4982 f_scramble(acc, secret + state->secretLimit);
4983 input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
4984 nbStripes -= state->nbStripesPerBlock;
4985 }
4986 /* consume last partial block */
4987 XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
4988 input += nbStripes * XXH_STRIPE_LEN;
4989 XXH_ASSERT(input < bEnd); /* at least some bytes left */
4990 state->nbStripesSoFar = nbStripes;
4991 /* buffer predecessor of last partial stripe */
4992 XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4993 XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
4994 } else {
4995 /* content to consume <= block size */
4996 /* Consume input by a multiple of internal buffer size */
4997 if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
4998 const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4999 do {
5000 XXH3_consumeStripes(acc,
5001 &state->nbStripesSoFar, state->nbStripesPerBlock,
5002 input, XXH3_INTERNALBUFFER_STRIPES,
5003 secret, state->secretLimit,
5004 f_acc512, f_scramble);
5005 input += XXH3_INTERNALBUFFER_SIZE;
5006 } while (input<limit);
5007 /* buffer predecessor of last partial stripe */
5008 XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
5009 }
5010 }
5011
5012 /* Some remaining input (always) : buffer it */
5013 XXH_ASSERT(input < bEnd);
5014 XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
5015 XXH_ASSERT(state->bufferedSize == 0);
5016 XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
5017 state->bufferedSize = (XXH32_hash_t)(bEnd-input);
5018 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
5019 /* save stack accumulators into state */
5020 memcpy(state->acc, acc, sizeof(acc));
5021 #endif
5022 }
5023
5024 return XXH_OK;
5025 }
5026
5027 /*! @ingroup xxh3_family */
5028 XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t * state,const void * input,size_t len)5029 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
5030 {
5031 return XXH3_update(state, (const xxh_u8*)input, len,
5032 XXH3_accumulate_512, XXH3_scrambleAcc);
5033 }
5034
5035
5036 XXH_FORCE_INLINE void
XXH3_digest_long(XXH64_hash_t * acc,const XXH3_state_t * state,const unsigned char * secret)5037 XXH3_digest_long (XXH64_hash_t* acc,
5038 const XXH3_state_t* state,
5039 const unsigned char* secret)
5040 {
5041 /*
5042 * Digest on a local copy. This way, the state remains unaltered, and it can
5043 * continue ingesting more input afterwards.
5044 */
5045 XXH_memcpy(acc, state->acc, sizeof(state->acc));
5046 if (state->bufferedSize >= XXH_STRIPE_LEN) {
5047 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
5048 size_t nbStripesSoFar = state->nbStripesSoFar;
5049 XXH3_consumeStripes(acc,
5050 &nbStripesSoFar, state->nbStripesPerBlock,
5051 state->buffer, nbStripes,
5052 secret, state->secretLimit,
5053 XXH3_accumulate_512, XXH3_scrambleAcc);
5054 /* last stripe */
5055 XXH3_accumulate_512(acc,
5056 state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
5057 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5058 } else { /* bufferedSize < XXH_STRIPE_LEN */
5059 xxh_u8 lastStripe[XXH_STRIPE_LEN];
5060 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
5061 XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
5062 XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
5063 XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
5064 XXH3_accumulate_512(acc,
5065 lastStripe,
5066 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5067 }
5068 }
5069
5070 /*! @ingroup xxh3_family */
XXH3_64bits_digest(const XXH3_state_t * state)5071 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
5072 {
5073 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5074 if (state->totalLen > XXH3_MIDSIZE_MAX) {
5075 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5076 XXH3_digest_long(acc, state, secret);
5077 return XXH3_mergeAccs(acc,
5078 secret + XXH_SECRET_MERGEACCS_START,
5079 (xxh_u64)state->totalLen * XXH_PRIME64_1);
5080 }
5081 /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
5082 if (state->useSeed)
5083 return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5084 return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
5085 secret, state->secretLimit + XXH_STRIPE_LEN);
5086 }
5087
5088
5089
5090 /* ==========================================
5091 * XXH3 128 bits (a.k.a XXH128)
5092 * ==========================================
5093 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
5094 * even without counting the significantly larger output size.
5095 *
5096 * For example, extra steps are taken to avoid the seed-dependent collisions
5097 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
5098 *
5099 * This strength naturally comes at the cost of some speed, especially on short
5100 * lengths. Note that longer hashes are about as fast as the 64-bit version
5101 * due to it using only a slight modification of the 64-bit loop.
5102 *
5103 * XXH128 is also more oriented towards 64-bit machines. It is still extremely
5104 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
5105 */
5106
5107 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5108 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5109 {
5110 /* A doubled version of 1to3_64b with different constants. */
5111 XXH_ASSERT(input != NULL);
5112 XXH_ASSERT(1 <= len && len <= 3);
5113 XXH_ASSERT(secret != NULL);
5114 /*
5115 * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
5116 * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
5117 * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
5118 */
5119 { xxh_u8 const c1 = input[0];
5120 xxh_u8 const c2 = input[len >> 1];
5121 xxh_u8 const c3 = input[len - 1];
5122 xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
5123 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
5124 xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
5125 xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
5126 xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
5127 xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5128 xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5129 XXH128_hash_t h128;
5130 h128.low64 = XXH64_avalanche(keyed_lo);
5131 h128.high64 = XXH64_avalanche(keyed_hi);
5132 return h128;
5133 }
5134 }
5135
5136 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5137 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5138 {
5139 XXH_ASSERT(input != NULL);
5140 XXH_ASSERT(secret != NULL);
5141 XXH_ASSERT(4 <= len && len <= 8);
5142 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
5143 { xxh_u32 const input_lo = XXH_readLE32(input);
5144 xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
5145 xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5146 xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
5147 xxh_u64 const keyed = input_64 ^ bitflip;
5148
5149 /* Shift len to the left to ensure it is even, this avoids even multiplies. */
5150 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
5151
5152 m128.high64 += (m128.low64 << 1);
5153 m128.low64 ^= (m128.high64 >> 3);
5154
5155 m128.low64 = XXH_xorshift64(m128.low64, 35);
5156 m128.low64 *= 0x9FB21C651E98DF25ULL;
5157 m128.low64 = XXH_xorshift64(m128.low64, 28);
5158 m128.high64 = XXH3_avalanche(m128.high64);
5159 return m128;
5160 }
5161 }
5162
5163 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5164 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5165 {
5166 XXH_ASSERT(input != NULL);
5167 XXH_ASSERT(secret != NULL);
5168 XXH_ASSERT(9 <= len && len <= 16);
5169 { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
5170 xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
5171 xxh_u64 const input_lo = XXH_readLE64(input);
5172 xxh_u64 input_hi = XXH_readLE64(input + len - 8);
5173 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5174 /*
5175 * Put len in the middle of m128 to ensure that the length gets mixed to
5176 * both the low and high bits in the 128x64 multiply below.
5177 */
5178 m128.low64 += (xxh_u64)(len - 1) << 54;
5179 input_hi ^= bitfliph;
5180 /*
5181 * Add the high 32 bits of input_hi to the high 32 bits of m128, then
5182 * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
5183 * the high 64 bits of m128.
5184 *
5185 * The best approach to this operation is different on 32-bit and 64-bit.
5186 */
5187 if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
5188 /*
5189 * 32-bit optimized version, which is more readable.
5190 *
5191 * On 32-bit, it removes an ADC and delays a dependency between the two
5192 * halves of m128.high64, but it generates an extra mask on 64-bit.
5193 */
5194 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5195 } else {
5196 /*
5197 * 64-bit optimized (albeit more confusing) version.
5198 *
5199 * Uses some properties of addition and multiplication to remove the mask:
5200 *
5201 * Let:
5202 * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
5203 * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
5204 * c = XXH_PRIME32_2
5205 *
5206 * a + (b * c)
5207 * Inverse Property: x + y - x == y
5208 * a + (b * (1 + c - 1))
5209 * Distributive Property: x * (y + z) == (x * y) + (x * z)
5210 * a + (b * 1) + (b * (c - 1))
5211 * Identity Property: x * 1 == x
5212 * a + b + (b * (c - 1))
5213 *
5214 * Substitute a, b, and c:
5215 * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5216 *
5217 * Since input_hi.hi + input_hi.lo == input_hi, we get this:
5218 * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5219 */
5220 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5221 }
5222 /* m128 ^= XXH_swap64(m128 >> 64); */
5223 m128.low64 ^= XXH_swap64(m128.high64);
5224
5225 { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
5226 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5227 h128.high64 += m128.high64 * XXH_PRIME64_2;
5228
5229 h128.low64 = XXH3_avalanche(h128.low64);
5230 h128.high64 = XXH3_avalanche(h128.high64);
5231 return h128;
5232 } }
5233 }
5234
5235 /*
5236 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
5237 */
5238 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5239 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5240 {
5241 XXH_ASSERT(len <= 16);
5242 { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
5243 if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
5244 if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
5245 { XXH128_hash_t h128;
5246 xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5247 xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5248 h128.low64 = XXH64_avalanche(seed ^ bitflipl);
5249 h128.high64 = XXH64_avalanche( seed ^ bitfliph);
5250 return h128;
5251 } }
5252 }
5253
5254 /*
5255 * A bit slower than XXH3_mix16B, but handles multiply by zero better.
5256 */
5257 XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc,const xxh_u8 * input_1,const xxh_u8 * input_2,const xxh_u8 * secret,XXH64_hash_t seed)5258 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
5259 const xxh_u8* secret, XXH64_hash_t seed)
5260 {
5261 acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
5262 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5263 acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
5264 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5265 return acc;
5266 }
5267
5268
5269 XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5270 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5271 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5272 XXH64_hash_t seed)
5273 {
5274 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5275 XXH_ASSERT(16 < len && len <= 128);
5276
5277 { XXH128_hash_t acc;
5278 acc.low64 = len * XXH_PRIME64_1;
5279 acc.high64 = 0;
5280 if (len > 32) {
5281 if (len > 64) {
5282 if (len > 96) {
5283 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
5284 }
5285 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
5286 }
5287 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
5288 }
5289 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
5290 { XXH128_hash_t h128;
5291 h128.low64 = acc.low64 + acc.high64;
5292 h128.high64 = (acc.low64 * XXH_PRIME64_1)
5293 + (acc.high64 * XXH_PRIME64_4)
5294 + ((len - seed) * XXH_PRIME64_2);
5295 h128.low64 = XXH3_avalanche(h128.low64);
5296 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5297 return h128;
5298 }
5299 }
5300 }
5301
5302 XXH_NO_INLINE XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5303 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5304 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5305 XXH64_hash_t seed)
5306 {
5307 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5308 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
5309
5310 { XXH128_hash_t acc;
5311 int const nbRounds = (int)len / 32;
5312 int i;
5313 acc.low64 = len * XXH_PRIME64_1;
5314 acc.high64 = 0;
5315 for (i=0; i<4; i++) {
5316 acc = XXH128_mix32B(acc,
5317 input + (32 * i),
5318 input + (32 * i) + 16,
5319 secret + (32 * i),
5320 seed);
5321 }
5322 acc.low64 = XXH3_avalanche(acc.low64);
5323 acc.high64 = XXH3_avalanche(acc.high64);
5324 XXH_ASSERT(nbRounds >= 4);
5325 for (i=4 ; i < nbRounds; i++) {
5326 acc = XXH128_mix32B(acc,
5327 input + (32 * i),
5328 input + (32 * i) + 16,
5329 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5330 seed);
5331 }
5332 /* last bytes */
5333 acc = XXH128_mix32B(acc,
5334 input + len - 16,
5335 input + len - 32,
5336 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5337 0ULL - seed);
5338
5339 { XXH128_hash_t h128;
5340 h128.low64 = acc.low64 + acc.high64;
5341 h128.high64 = (acc.low64 * XXH_PRIME64_1)
5342 + (acc.high64 * XXH_PRIME64_4)
5343 + ((len - seed) * XXH_PRIME64_2);
5344 h128.low64 = XXH3_avalanche(h128.low64);
5345 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5346 return h128;
5347 }
5348 }
5349 }
5350
5351 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)5352 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5353 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5354 XXH3_f_accumulate_512 f_acc512,
5355 XXH3_f_scrambleAcc f_scramble)
5356 {
5357 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5358
5359 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5360
5361 /* converge into final hash */
5362 XXH_STATIC_ASSERT(sizeof(acc) == 64);
5363 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5364 { XXH128_hash_t h128;
5365 h128.low64 = XXH3_mergeAccs(acc,
5366 secret + XXH_SECRET_MERGEACCS_START,
5367 (xxh_u64)len * XXH_PRIME64_1);
5368 h128.high64 = XXH3_mergeAccs(acc,
5369 secret + secretSize
5370 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5371 ~((xxh_u64)len * XXH_PRIME64_2));
5372 return h128;
5373 }
5374 }
5375
5376 /*
5377 * It's important for performance that XXH3_hashLong is not inlined.
5378 */
5379 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5380 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5381 XXH64_hash_t seed64,
5382 const void* XXH_RESTRICT secret, size_t secretLen)
5383 {
5384 (void)seed64; (void)secret; (void)secretLen;
5385 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5386 XXH3_accumulate_512, XXH3_scrambleAcc);
5387 }
5388
5389 /*
5390 * It's important for performance to pass @secretLen (when it's static)
5391 * to the compiler, so that it can properly optimize the vectorized loop.
5392 */
5393 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5394 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5395 XXH64_hash_t seed64,
5396 const void* XXH_RESTRICT secret, size_t secretLen)
5397 {
5398 (void)seed64;
5399 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5400 XXH3_accumulate_512, XXH3_scrambleAcc);
5401 }
5402
5403 XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)5404 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5405 XXH64_hash_t seed64,
5406 XXH3_f_accumulate_512 f_acc512,
5407 XXH3_f_scrambleAcc f_scramble,
5408 XXH3_f_initCustomSecret f_initSec)
5409 {
5410 if (seed64 == 0)
5411 return XXH3_hashLong_128b_internal(input, len,
5412 XXH3_kSecret, sizeof(XXH3_kSecret),
5413 f_acc512, f_scramble);
5414 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5415 f_initSec(secret, seed64);
5416 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5417 f_acc512, f_scramble);
5418 }
5419 }
5420
5421 /*
5422 * It's important for performance that XXH3_hashLong is not inlined.
5423 */
5424 XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5425 XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5426 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5427 {
5428 (void)secret; (void)secretLen;
5429 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5430 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5431 }
5432
5433 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5434 XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5435
5436 XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong128_f f_hl128)5437 XXH3_128bits_internal(const void* input, size_t len,
5438 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5439 XXH3_hashLong128_f f_hl128)
5440 {
5441 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5442 /*
5443 * If an action is to be taken if `secret` conditions are not respected,
5444 * it should be done here.
5445 * For now, it's a contract pre-condition.
5446 * Adding a check and a branch here would cost performance at every hash.
5447 */
5448 if (len <= 16)
5449 return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5450 if (len <= 128)
5451 return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5452 if (len <= XXH3_MIDSIZE_MAX)
5453 return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5454 return f_hl128(input, len, seed64, secret, secretLen);
5455 }
5456
5457
5458 /* === Public XXH128 API === */
5459
5460 /*! @ingroup xxh3_family */
XXH3_128bits(const void * input,size_t len)5461 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5462 {
5463 return XXH3_128bits_internal(input, len, 0,
5464 XXH3_kSecret, sizeof(XXH3_kSecret),
5465 XXH3_hashLong_128b_default);
5466 }
5467
5468 /*! @ingroup xxh3_family */
5469 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)5470 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5471 {
5472 return XXH3_128bits_internal(input, len, 0,
5473 (const xxh_u8*)secret, secretSize,
5474 XXH3_hashLong_128b_withSecret);
5475 }
5476
5477 /*! @ingroup xxh3_family */
5478 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)5479 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5480 {
5481 return XXH3_128bits_internal(input, len, seed,
5482 XXH3_kSecret, sizeof(XXH3_kSecret),
5483 XXH3_hashLong_128b_withSeed);
5484 }
5485
5486 /*! @ingroup xxh3_family */
5487 XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecretandSeed(const void * input,size_t len,const void * secret,size_t secretSize,XXH64_hash_t seed)5488 XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
5489 {
5490 if (len <= XXH3_MIDSIZE_MAX)
5491 return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5492 return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
5493 }
5494
5495 /*! @ingroup xxh3_family */
5496 XXH_PUBLIC_API XXH128_hash_t
XXH128(const void * input,size_t len,XXH64_hash_t seed)5497 XXH128(const void* input, size_t len, XXH64_hash_t seed)
5498 {
5499 return XXH3_128bits_withSeed(input, len, seed);
5500 }
5501
5502
5503 /* === XXH3 128-bit streaming === */
5504
5505 /*
5506 * All initialization and update functions are identical to 64-bit streaming variant.
5507 * The only difference is the finalization routine.
5508 */
5509
5510 /*! @ingroup xxh3_family */
5511 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t * statePtr)5512 XXH3_128bits_reset(XXH3_state_t* statePtr)
5513 {
5514 return XXH3_64bits_reset(statePtr);
5515 }
5516
5517 /*! @ingroup xxh3_family */
5518 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)5519 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5520 {
5521 return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
5522 }
5523
5524 /*! @ingroup xxh3_family */
5525 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)5526 XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
5527 {
5528 return XXH3_64bits_reset_withSeed(statePtr, seed);
5529 }
5530
5531 /*! @ingroup xxh3_family */
5532 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH3_state_t * statePtr,const void * secret,size_t secretSize,XXH64_hash_t seed)5533 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
5534 {
5535 return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
5536 }
5537
5538 /*! @ingroup xxh3_family */
5539 XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t * state,const void * input,size_t len)5540 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5541 {
5542 return XXH3_update(state, (const xxh_u8*)input, len,
5543 XXH3_accumulate_512, XXH3_scrambleAcc);
5544 }
5545
5546 /*! @ingroup xxh3_family */
XXH3_128bits_digest(const XXH3_state_t * state)5547 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
5548 {
5549 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5550 if (state->totalLen > XXH3_MIDSIZE_MAX) {
5551 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5552 XXH3_digest_long(acc, state, secret);
5553 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5554 { XXH128_hash_t h128;
5555 h128.low64 = XXH3_mergeAccs(acc,
5556 secret + XXH_SECRET_MERGEACCS_START,
5557 (xxh_u64)state->totalLen * XXH_PRIME64_1);
5558 h128.high64 = XXH3_mergeAccs(acc,
5559 secret + state->secretLimit + XXH_STRIPE_LEN
5560 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5561 ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5562 return h128;
5563 }
5564 }
5565 /* len <= XXH3_MIDSIZE_MAX : short code */
5566 if (state->seed)
5567 return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5568 return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5569 secret, state->secretLimit + XXH_STRIPE_LEN);
5570 }
5571
5572 /* 128-bit utility functions */
5573
5574 #include <string.h> /* memcmp, memcpy */
5575
5576 /* return : 1 is equal, 0 if different */
5577 /*! @ingroup xxh3_family */
XXH128_isEqual(XXH128_hash_t h1,XXH128_hash_t h2)5578 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
5579 {
5580 /* note : XXH128_hash_t is compact, it has no padding byte */
5581 return !(memcmp(&h1, &h2, sizeof(h1)));
5582 }
5583
5584 /* This prototype is compatible with stdlib's qsort().
5585 * return : >0 if *h128_1 > *h128_2
5586 * <0 if *h128_1 < *h128_2
5587 * =0 if *h128_1 == *h128_2 */
5588 /*! @ingroup xxh3_family */
XXH128_cmp(const void * h128_1,const void * h128_2)5589 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5590 {
5591 XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5592 XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5593 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5594 /* note : bets that, in most cases, hash values are different */
5595 if (hcmp) return hcmp;
5596 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5597 }
5598
5599
5600 /*====== Canonical representation ======*/
5601 /*! @ingroup xxh3_family */
5602 XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t * dst,XXH128_hash_t hash)5603 XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
5604 {
5605 XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5606 if (XXH_CPU_LITTLE_ENDIAN) {
5607 hash.high64 = XXH_swap64(hash.high64);
5608 hash.low64 = XXH_swap64(hash.low64);
5609 }
5610 XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
5611 XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5612 }
5613
5614 /*! @ingroup xxh3_family */
5615 XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t * src)5616 XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5617 {
5618 XXH128_hash_t h;
5619 h.high64 = XXH_readBE64(src);
5620 h.low64 = XXH_readBE64(src->digest + 8);
5621 return h;
5622 }
5623
5624
5625
5626 /* ==========================================
5627 * Secret generators
5628 * ==========================================
5629 */
5630 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
5631
XXH3_combine16(void * dst,XXH128_hash_t h128)5632 XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
5633 {
5634 XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
5635 XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
5636 }
5637
5638 /*! @ingroup xxh3_family */
5639 XXH_PUBLIC_API XXH_errorcode
XXH3_generateSecret(void * secretBuffer,size_t secretSize,const void * customSeed,size_t customSeedSize)5640 XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
5641 {
5642 #if (XXH_DEBUGLEVEL >= 1)
5643 XXH_ASSERT(secretBuffer != NULL);
5644 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5645 #else
5646 /* production mode, assert() are disabled */
5647 if (secretBuffer == NULL) return XXH_ERROR;
5648 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5649 #endif
5650
5651 if (customSeedSize == 0) {
5652 customSeed = XXH3_kSecret;
5653 customSeedSize = XXH_SECRET_DEFAULT_SIZE;
5654 }
5655 #if (XXH_DEBUGLEVEL >= 1)
5656 XXH_ASSERT(customSeed != NULL);
5657 #else
5658 if (customSeed == NULL) return XXH_ERROR;
5659 #endif
5660
5661 /* Fill secretBuffer with a copy of customSeed - repeat as needed */
5662 { size_t pos = 0;
5663 while (pos < secretSize) {
5664 size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
5665 memcpy((char*)secretBuffer + pos, customSeed, toCopy);
5666 pos += toCopy;
5667 } }
5668
5669 { size_t const nbSeg16 = secretSize / 16;
5670 size_t n;
5671 XXH128_canonical_t scrambler;
5672 XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
5673 for (n=0; n<nbSeg16; n++) {
5674 XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
5675 XXH3_combine16((char*)secretBuffer + n*16, h128);
5676 }
5677 /* last segment */
5678 XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
5679 }
5680 return XXH_OK;
5681 }
5682
5683 /*! @ingroup xxh3_family */
5684 XXH_PUBLIC_API void
XXH3_generateSecret_fromSeed(void * secretBuffer,XXH64_hash_t seed)5685 XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
5686 {
5687 XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5688 XXH3_initCustomSecret(secret, seed);
5689 XXH_ASSERT(secretBuffer != NULL);
5690 memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
5691 }
5692
5693
5694
5695 /* Pop our optimization override from above */
5696 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5697 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5698 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5699 # pragma GCC pop_options
5700 #endif
5701
5702 #endif /* XXH_NO_LONG_LONG */
5703
5704 #endif /* XXH_NO_XXH3 */
5705
5706 /*!
5707 * @}
5708 */
5709 #endif /* XXH_IMPLEMENTATION */
5710
5711
5712 #if defined (__cplusplus)
5713 }
5714 #endif
5715